diff --git a/.github/.OwlBot.yaml b/.github/.OwlBot-hermetic.yaml similarity index 97% rename from .github/.OwlBot.yaml rename to .github/.OwlBot-hermetic.yaml index f8afbf407f5..ec923ff560e 100644 --- a/.github/.OwlBot.yaml +++ b/.github/.OwlBot-hermetic.yaml @@ -11,10 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -docker: - image: "gcr.io/cloud-devrel-public-resources/owlbot-java:latest" - deep-remove-regex: - "/grpc-google-.*/src" - "/proto-google-.*/src" diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml deleted file mode 100644 index 6983bb26347..00000000000 --- a/.github/.OwlBot.lock.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -docker: - image: gcr.io/cloud-devrel-public-resources/owlbot-java:latest - digest: sha256:25b384ee1674eda3984ec41c15b514a63bbeb5eda4d57c73c7e6f5adef2fd2f1 -# created: 2024-04-05T19:12:34.133475268Z diff --git a/.github/release-please.yml b/.github/release-please.yml index 67621e4ea6c..2853b1763cf 100644 --- a/.github/release-please.yml +++ b/.github/release-please.yml @@ -34,3 +34,11 @@ branches: bumpMinorPreMajor: true handleGHRelease: true branch: 6.55.x + - releaseType: java-backport + bumpMinorPreMajor: true + handleGHRelease: true + branch: 6.67.x + - releaseType: java-backport + bumpMinorPreMajor: true + handleGHRelease: true + branch: 6.66.x diff --git a/.github/scripts/update_generation_config.sh b/.github/scripts/update_generation_config.sh new file mode 100644 index 00000000000..561a313040f --- /dev/null +++ b/.github/scripts/update_generation_config.sh @@ -0,0 +1,121 @@ +#!/bin/bash +set -e +# This script should be run at the root of the repository. +# This script is used to update googleapis_commitish, gapic_generator_version, +# and libraries_bom_version in generation configuration at the time of running +# and create a pull request. + +# The following commands need to be installed before running the script: +# 1. git +# 2. gh +# 3. jq + +# Utility functions +# Get the latest released version of a Maven artifact. +function get_latest_released_version() { + local group_id=$1 + local artifact_id=$2 + latest=$(curl -s "https://search.maven.org/solrsearch/select?q=g:${group_id}+AND+a:${artifact_id}&core=gav&rows=500&wt=json" | jq -r '.response.docs[] | select(.v | test("^[0-9]+(\\.[0-9]+)*$")) | .v' | sort -V | tail -n 1) + echo "${latest}" +} + +# Update a key to a new value in the generation config. +function update_config() { + local key_word=$1 + local new_value=$2 + local file=$3 + echo "Update ${key_word} to ${new_value} in ${file}" + sed -i -e "s/^${key_word}.*$/${key_word}: ${new_value}/" "${file}" +} + +# The parameters of this script is: +# 1. base_branch, the base branch of the result pull request. +# 2. repo, organization/repo-name, e.g., googleapis/google-cloud-java +# 3. [optional] generation_config, the path to the generation configuration, +# the default value is generation_config.yaml in the repository root. +while [[ $# -gt 0 ]]; do +key="$1" +case "${key}" in + --base_branch) + base_branch="$2" + shift + ;; + --repo) + repo="$2" + shift + ;; + --generation_config) + generation_config="$2" + shift + ;; + *) + echo "Invalid option: [$1]" + exit 1 + ;; +esac +shift +done + +if [ -z "${base_branch}" ]; then + echo "missing required argument --base_branch" + exit 1 +fi + +if [ -z "${repo}" ]; then + echo "missing required argument --repo" + exit 1 +fi + +if [ -z "${generation_config}" ]; then + generation_config="generation_config.yaml" + echo "Use default generation config: ${generation_config}" +fi + +current_branch="generate-libraries-${base_branch}" +title="chore: Update generation configuration at $(date)" + +# try to find a open pull request associated with the branch +pr_num=$(gh pr list -s open -H "${current_branch}" -q . --json number | jq ".[] | .number") +# create a branch if there's no open pull request associated with the +# branch; otherwise checkout the pull request. +if [ -z "${pr_num}" ]; then + git checkout -b "${current_branch}" +else + gh pr checkout "${pr_num}" +fi + +mkdir tmp-googleapis +# use partial clone because only commit history is needed. +git clone --filter=blob:none https://github.com/googleapis/googleapis.git tmp-googleapis +pushd tmp-googleapis +git pull +latest_commit=$(git rev-parse HEAD) +popd +rm -rf tmp-googleapis +update_config "googleapis_commitish" "${latest_commit}" "${generation_config}" + +# update gapic-generator-java version to the latest +latest_version=$(get_latest_released_version "com.google.api" "gapic-generator-java") +update_config "gapic_generator_version" "${latest_version}" "${generation_config}" + +# update libraries-bom version to the latest +latest_version=$(get_latest_released_version "com.google.cloud" "libraries-bom") +update_config "libraries_bom_version" "${latest_version}" "${generation_config}" + +git add "${generation_config}" +changed_files=$(git diff --cached --name-only) +if [[ "${changed_files}" == "" ]]; then + echo "The latest generation config is not changed." + echo "Skip committing to the pull request." + exit 0 +fi +git commit -m "${title}" +if [ -z "${pr_num}" ]; then + git remote add remote_repo https://cloud-java-bot:"${GH_TOKEN}@github.com/${repo}.git" + git fetch -q --unshallow remote_repo + git push -f remote_repo "${current_branch}" + gh pr create --title "${title}" --head "${current_branch}" --body "${title}" --base "${base_branch}" +else + git push + gh pr edit "${pr_num}" --title "${title}" --body "${title}" +fi diff --git a/.github/sync-repo-settings.yaml b/.github/sync-repo-settings.yaml index 9a71e7679c2..7ab67b223ec 100644 --- a/.github/sync-repo-settings.yaml +++ b/.github/sync-repo-settings.yaml @@ -19,7 +19,6 @@ branchProtectionRules: - checkstyle - compile (8) - compile (11) - - OwlBot Post Processor - units-with-multiplexed-session (8) - units-with-multiplexed-session (11) - pattern: 3.3.x @@ -141,7 +140,25 @@ branchProtectionRules: - checkstyle - compile (8) - compile (11) - - OwlBot Post Processor + - pattern: 6.66.x + isAdminEnforced: true + requiredApprovingReviewCount: 1 + requiresCodeOwnerReviews: true + requiresStrictStatusChecks: false + requiredStatusCheckContexts: + - dependencies (17) + - lint + - javadoc + - units (8) + - units (11) + - 'Kokoro - Test: Integration' + - 'Kokoro - Test: Integration with Multiplexed Sessions' + - cla/google + - checkstyle + - compile (8) + - compile (11) + - units-with-multiplexed-session (8) + - units-with-multiplexed-session (11) permissionRules: - team: yoshi-admins permission: admin diff --git a/.github/workflows/approve-readme.yaml b/.github/workflows/approve-readme.yaml index f5fc7d5169e..59f00b8eb6e 100644 --- a/.github/workflows/approve-readme.yaml +++ b/.github/workflows/approve-readme.yaml @@ -21,7 +21,7 @@ jobs: runs-on: ubuntu-latest if: github.repository_owner == 'googleapis' && github.head_ref == 'autosynth-readme' steps: - - uses: actions/github-script@v6 + - uses: actions/github-script@v7 with: github-token: ${{secrets.YOSHI_APPROVER_TOKEN}} script: | diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 0e38c416770..7eca4c6d5f0 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -52,7 +52,7 @@ jobs: - run: .kokoro/build.sh env: JOB_TYPE: test - GOOGLE_CLOUD_SPANNER_ENABLE_MULTIPLEXED_SESSIONS: true + GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS: true units-java8: # Building using Java 17 and run the tests with Java 8 runtime name: "units (8)" @@ -91,7 +91,7 @@ jobs: - run: .kokoro/build.sh env: JOB_TYPE: test - GOOGLE_CLOUD_SPANNER_ENABLE_MULTIPLEXED_SESSIONS: true + GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS: true windows: runs-on: windows-latest steps: diff --git a/.github/workflows/hermetic_library_generation.yaml b/.github/workflows/hermetic_library_generation.yaml new file mode 100644 index 00000000000..9399ebef235 --- /dev/null +++ b/.github/workflows/hermetic_library_generation.yaml @@ -0,0 +1,45 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# GitHub action job to test core java library features on +# downstream client libraries before they are released. +name: Hermetic library generation upon generation config change through pull requests +on: + pull_request: + +env: + REPO_FULL_NAME: ${{ github.event.pull_request.head.repo.full_name }} + GITHUB_REPOSITORY: ${{ github.repository }} +jobs: + library_generation: + runs-on: ubuntu-latest + steps: + - name: Determine whether the pull request comes from a fork + run: | + if [[ "${GITHUB_REPOSITORY}" != "${REPO_FULL_NAME}" ]]; then + echo "This PR comes from a fork. Skip library generation." + echo "SHOULD_RUN=false" >> $GITHUB_ENV + else + echo "SHOULD_RUN=true" >> $GITHUB_ENV + fi + - uses: actions/checkout@v4 + if: env.SHOULD_RUN == 'true' + with: + fetch-depth: 0 + token: ${{ secrets.CLOUD_JAVA_BOT_TOKEN }} + - uses: googleapis/sdk-platform-java/.github/scripts@v2.47.0 + if: env.SHOULD_RUN == 'true' + with: + base_ref: ${{ github.base_ref }} + head_ref: ${{ github.head_ref }} + token: ${{ secrets.CLOUD_JAVA_BOT_TOKEN }} diff --git a/.github/workflows/integration-tests-against-emulator-with-multiplexed-session.yaml b/.github/workflows/integration-tests-against-emulator-with-multiplexed-session.yaml index 741fecb089d..bd7dfef3972 100644 --- a/.github/workflows/integration-tests-against-emulator-with-multiplexed-session.yaml +++ b/.github/workflows/integration-tests-against-emulator-with-multiplexed-session.yaml @@ -39,4 +39,4 @@ jobs: env: JOB_TYPE: test SPANNER_EMULATOR_HOST: localhost:9010 - GOOGLE_CLOUD_SPANNER_ENABLE_MULTIPLEXED_SESSIONS: true + GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS: true diff --git a/.github/workflows/renovate_config_check.yaml b/.github/workflows/renovate_config_check.yaml index 87d8eb2be8c..7c5ec7865e1 100644 --- a/.github/workflows/renovate_config_check.yaml +++ b/.github/workflows/renovate_config_check.yaml @@ -14,7 +14,7 @@ jobs: uses: actions/checkout@v4 - name: Set up Node.js - uses: actions/setup-node@v3 + uses: actions/setup-node@v4 with: node-version: '20' diff --git a/.github/workflows/unmanaged_dependency_check.yaml b/.github/workflows/unmanaged_dependency_check.yaml index f6594602a26..ce5a61f9f07 100644 --- a/.github/workflows/unmanaged_dependency_check.yaml +++ b/.github/workflows/unmanaged_dependency_check.yaml @@ -17,6 +17,6 @@ jobs: # repository .kokoro/build.sh - name: Unmanaged dependency check - uses: googleapis/sdk-platform-java/java-shared-dependencies/unmanaged-dependency-check@google-cloud-shared-dependencies/v3.30.0 + uses: googleapis/sdk-platform-java/java-shared-dependencies/unmanaged-dependency-check@google-cloud-shared-dependencies/v3.37.0 with: bom-path: google-cloud-spanner-bom/pom.xml diff --git a/.github/workflows/update_generation_config.yaml b/.github/workflows/update_generation_config.yaml new file mode 100644 index 00000000000..f15c807853d --- /dev/null +++ b/.github/workflows/update_generation_config.yaml @@ -0,0 +1,42 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# GitHub action job to test core java library features on +# downstream client libraries before they are released. +name: Update generation configuration +on: + schedule: + - cron: '0 2 * * *' + workflow_dispatch: + +jobs: + update-generation-config: + runs-on: ubuntu-24.04 + env: + # the branch into which the pull request is merged + base_branch: main + steps: + - uses: actions/checkout@v4 + with: + token: ${{ secrets.CLOUD_JAVA_BOT_TOKEN }} + - name: Update params in generation config to latest + shell: bash + run: | + set -x + [ -z "$(git config user.email)" ] && git config --global user.email "cloud-java-bot@google.com" + [ -z "$(git config user.name)" ] && git config --global user.name "cloud-java-bot" + bash .github/scripts/update_generation_config.sh \ + --base_branch "${base_branch}"\ + --repo ${{ github.repository }} + env: + GH_TOKEN: ${{ secrets.CLOUD_JAVA_BOT_TOKEN }} diff --git a/.kokoro/build.sh b/.kokoro/build.sh index d3eaf9922bb..f8ae5a96f37 100755 --- a/.kokoro/build.sh +++ b/.kokoro/build.sh @@ -48,6 +48,16 @@ if [[ ! -z "${GOOGLE_APPLICATION_CREDENTIALS}" && "${GOOGLE_APPLICATION_CREDENTI export GOOGLE_APPLICATION_CREDENTIALS=$(realpath ${KOKORO_GFILE_DIR}/${GOOGLE_APPLICATION_CREDENTIALS}) fi +# Start the Spanner emulator if the environment variable for it has been set. +# TODO: Change if statement once the env var can be set in the config. +#if [[ ! -z "${SPANNER_EMULATOR_HOST}" ]]; then +if [[ "$JOB_TYPE" == "graalvm" ]] || [[ "$JOB_TYPE" == "graalvm17" ]]; then + echo "Starting emulator" + export SPANNER_EMULATOR_HOST=localhost:9010 + docker pull gcr.io/cloud-spanner-emulator/emulator + docker run -d --rm --name spanner-emulator -p 9010:9010 -p 9020:9020 gcr.io/cloud-spanner-emulator/emulator +fi + # Kokoro integration test uses both JDK 11 and JDK 8. We ensure the generated class files # are compatible with Java 8 when running tests. if [ -n "${JAVA8_HOME}" ]; then @@ -233,6 +243,11 @@ clirr) ;; esac +if [[ ! -z "${SPANNER_EMULATOR_HOST}" ]]; then + echo "Stopping emulator" + docker container stop spanner-emulator +fi + if [ "${REPORT_COVERAGE}" == "true" ] then bash ${KOKORO_GFILE_DIR}/codecov.sh diff --git a/.kokoro/presubmit/graalvm-native-17.cfg b/.kokoro/presubmit/graalvm-native-17.cfg index b20ec8ff352..0f8b919c91f 100644 --- a/.kokoro/presubmit/graalvm-native-17.cfg +++ b/.kokoro/presubmit/graalvm-native-17.cfg @@ -3,7 +3,7 @@ # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_b:3.30.0" + value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_b:3.37.0" } env_vars: { diff --git a/.kokoro/presubmit/graalvm-native.cfg b/.kokoro/presubmit/graalvm-native.cfg index aad0db97859..e6553bd6e41 100644 --- a/.kokoro/presubmit/graalvm-native.cfg +++ b/.kokoro/presubmit/graalvm-native.cfg @@ -3,7 +3,7 @@ # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_a:3.30.0" + value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_a:3.37.0" } env_vars: { diff --git a/.kokoro/presubmit/integration-multiplexed-sessions-enabled.cfg b/.kokoro/presubmit/integration-multiplexed-sessions-enabled.cfg index 0acb1a445b0..771405de422 100644 --- a/.kokoro/presubmit/integration-multiplexed-sessions-enabled.cfg +++ b/.kokoro/presubmit/integration-multiplexed-sessions-enabled.cfg @@ -33,6 +33,6 @@ env_vars: { } env_vars: { - key: "GOOGLE_CLOUD_SPANNER_ENABLE_MULTIPLEXED_SESSIONS" + key: "GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS" value: "true" -} \ No newline at end of file +} diff --git a/.readme-partials.yaml b/.readme-partials.yaml index 5c4e1db63b7..65ae24d8b58 100644 --- a/.readme-partials.yaml +++ b/.readme-partials.yaml @@ -144,12 +144,45 @@ custom_content: | .build() SpannerOptions options = SpannerOptions.newBuilder() - // Inject OpenTelemetry object via Spanner Options or register OpenTelmetry object as Global + // Inject OpenTelemetry object via Spanner Options or register OpenTelemetry object as Global .setOpenTelemetry(openTelemetry) .build(); Spanner spanner = options.getService(); ``` + + #### OpenTelemetry SQL Statement Tracing + The OpenTelemetry traces that are generated by the Java client include any request and transaction + tags that have been set. The traces can also include the SQL statements that are executed and the + name of the thread that executes the statement. Enable this with the `enableExtendedTracing` + option: + + ``` + SpannerOptions options = SpannerOptions.newBuilder() + .setOpenTelemetry(openTelemetry) + .setEnableExtendedTracing(true) + .build(); + ``` + + This option can also be enabled by setting the environment variable + `SPANNER_ENABLE_EXTENDED_TRACING=true`. + + #### OpenTelemetry API Tracing + You can enable tracing of each API call that the Spanner client executes with the `enableApiTracing` + option. These traces also include any retry attempts for an API call: + + ``` + SpannerOptions options = SpannerOptions.newBuilder() + .setOpenTelemetry(openTelemetry) + .setEnableApiTracing(true) + .build(); + ``` + + This option can also be enabled by setting the environment variable + `SPANNER_ENABLE_API_TRACING=true`. + + > Note: The attribute keys that are used for additional information about retry attempts and the number of requests might change in a future release. + ### Instrument with OpenCensus @@ -283,6 +316,38 @@ custom_content: | Spanner spanner = options.getService(); ``` + + #### OpenTelemetry SQL Statement Tracing + The OpenTelemetry traces that are generated by the Java client include any request and transaction + tags that have been set. The traces can also include the SQL statements that are executed and the + name of the thread that executes the statement. Enable this with the `enableExtendedTracing` + option: + + ``` + SpannerOptions options = SpannerOptions.newBuilder() + .setOpenTelemetry(openTelemetry) + .setEnableExtendedTracing(true) + .build(); + ``` + + This option can also be enabled by setting the environment variable + `SPANNER_ENABLE_EXTENDED_TRACING=true`. + + #### OpenTelemetry API Tracing + You can enable tracing of each API call that the Spanner client executes with the `enableApiTracing` + option. These traces also include any retry attempts for an API call: + + ``` + SpannerOptions options = SpannerOptions.newBuilder() + .setOpenTelemetry(openTelemetry) + .setEnableApiTracing(true) + .build(); + ``` + + This option can also be enabled by setting the environment variable + `SPANNER_ENABLE_API_TRACING=true`. + + > Note: The attribute keys that are used for additional information about retry attempts and the number of requests might change in a future release. ## Migrate from OpenCensus to OpenTelemetry diff --git a/.repo-metadata.json b/.repo-metadata.json index 80355fa2aff..7848b32f2b6 100644 --- a/.repo-metadata.json +++ b/.repo-metadata.json @@ -2,21 +2,20 @@ "api_shortname": "spanner", "name_pretty": "Cloud Spanner", "product_documentation": "https://cloud.google.com/spanner/docs/", + "api_description": "is a fully managed, mission-critical, relational database service that offers transactional consistency at global scale, \\nschemas, SQL (ANSI 2011 with extensions), and automatic, synchronous replication \\nfor high availability.\\n\\nBe sure to activate the Cloud Spanner API on the Developer's Console to\\nuse Cloud Spanner from your project.", "client_documentation": "https://cloud.google.com/java/docs/reference/google-cloud-spanner/latest/history", - "api_description": "is a fully managed, mission-critical, \nrelational database service that offers transactional consistency at global scale, \nschemas, SQL (ANSI 2011 with extensions), and automatic, synchronous replication \nfor high availability.\n\nBe sure to activate the Cloud Spanner API on the Developer's Console to\nuse Cloud Spanner from your project.", - "issue_tracker": "https://issuetracker.google.com/issues?q=componentid:190851%2B%20status:open", "release_level": "stable", + "transport": "grpc", "language": "java", - "min_java_version": 8, "repo": "googleapis/java-spanner", "repo_short": "java-spanner", "distribution_name": "com.google.cloud:google-cloud-spanner", "api_id": "spanner.googleapis.com", - "transport": "grpc", + "library_type": "GAPIC_COMBO", "requires_billing": true, "codeowner_team": "@googleapis/api-spanner-java", - "library_type": "GAPIC_COMBO", "excluded_poms": "google-cloud-spanner-bom", - "recommended_package": "com.google.cloud.spanner" -} - + "issue_tracker": "https://issuetracker.google.com/issues?q=componentid:190851%2B%20status:open", + "recommended_package": "com.google.cloud.spanner", + "min_java_version": 8 +} \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index a878fe0500f..b7e2901228f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,239 @@ # Changelog +## [6.78.0](https://github.com/googleapis/java-spanner/compare/v6.77.0...v6.78.0) (2024-10-11) + + +### Features + +* Define ReplicaComputeCapacity and AsymmetricAutoscalingOption ([f46a6b3](https://github.com/googleapis/java-spanner/commit/f46a6b34383fe45d63b2db912389b26067f3a853)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.47.0 ([139a715](https://github.com/googleapis/java-spanner/commit/139a715d3f617b20a00b0cf4f5819e5a61a87c96)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-trace to v2.52.0 ([#3393](https://github.com/googleapis/java-spanner/issues/3393)) ([79453f9](https://github.com/googleapis/java-spanner/commit/79453f9985eda10631cd29ae58c0cedf234c2e18)) + +## [6.77.0](https://github.com/googleapis/java-spanner/compare/v6.76.0...v6.77.0) (2024-10-02) + + +### Features + +* Add INTERVAL API ([c078ac3](https://github.com/googleapis/java-spanner/commit/c078ac34c3d14b13bbd4a507de4f0013975dca4e)) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-monitoring-v3 to v3.52.0 ([#3291](https://github.com/googleapis/java-spanner/issues/3291)) ([9241063](https://github.com/googleapis/java-spanner/commit/92410638b0ba88f8e89e28bd12dd58830f7aaeb3)) +* Update dependency com.google.cloud:google-cloud-monitoring to v3.52.0 ([#3292](https://github.com/googleapis/java-spanner/issues/3292)) ([da27a19](https://github.com/googleapis/java-spanner/commit/da27a1992e40b1b4591f0232f687d8031387e749)) +* Update dependency com.google.cloud:google-cloud-monitoring to v3.52.0 ([#3293](https://github.com/googleapis/java-spanner/issues/3293)) ([c6dbdb2](https://github.com/googleapis/java-spanner/commit/c6dbdb255eb4cd231a2dc7cef94bf3353fa7e837)) +* Update dependency com.google.cloud:google-cloud-trace to v2.51.0 ([#3294](https://github.com/googleapis/java-spanner/issues/3294)) ([a269747](https://github.com/googleapis/java-spanner/commit/a269747889ea0b2380f07e1efef3b288a9c4fd04)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.36.1 ([#3355](https://github.com/googleapis/java-spanner/issues/3355)) ([5191e71](https://github.com/googleapis/java-spanner/commit/5191e71a83a316b41564ce2604980c8f33135f2f)) +* Update dependency com.google.cloud.opentelemetry:exporter-metrics to v0.32.0 ([#3371](https://github.com/googleapis/java-spanner/issues/3371)) ([d5b5ca0](https://github.com/googleapis/java-spanner/commit/d5b5ca0cccc6cf73d759245d2bd72f33c7d39830)) +* Update dependency com.google.cloud.opentelemetry:exporter-trace to v0.32.0 ([#3372](https://github.com/googleapis/java-spanner/issues/3372)) ([aa9a71d](https://github.com/googleapis/java-spanner/commit/aa9a71d38dabd8d1974bb553761e93735ade5c26)) +* Update dependency commons-io:commons-io to v2.17.0 ([#3349](https://github.com/googleapis/java-spanner/issues/3349)) ([7c21164](https://github.com/googleapis/java-spanner/commit/7c21164f2b8e75afab268f2fb8e132a372ac0d67)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.42.1 ([#3323](https://github.com/googleapis/java-spanner/issues/3323)) ([95dfc02](https://github.com/googleapis/java-spanner/commit/95dfc02ae2d65f99219dcced66cf4e74d1c4975b)) +* Update dependency ubuntu to v24 ([#3356](https://github.com/googleapis/java-spanner/issues/3356)) ([042c294](https://github.com/googleapis/java-spanner/commit/042c294cc5f83eebd2e3600cffb165e5b467d63e)) +* Update googleapis/sdk-platform-java action to v2.46.1 ([#3354](https://github.com/googleapis/java-spanner/issues/3354)) ([378f5cf](https://github.com/googleapis/java-spanner/commit/378f5cfb08d4e5ee80b21007bfc829de61bfbdbe)) +* Update junixsocket.version to v2.10.1 ([#3367](https://github.com/googleapis/java-spanner/issues/3367)) ([5f94915](https://github.com/googleapis/java-spanner/commit/5f94915941c4e4132f8460a04dde0643fa63ab99)) +* Update opentelemetry.version to v1.42.1 ([#3330](https://github.com/googleapis/java-spanner/issues/3330)) ([7b05e43](https://github.com/googleapis/java-spanner/commit/7b05e4301953364617691e8ae225cea823e3a323)) + + +### Documentation + +* Update comment for PROFILE QueryMode ([c078ac3](https://github.com/googleapis/java-spanner/commit/c078ac34c3d14b13bbd4a507de4f0013975dca4e)) + +## [6.76.0](https://github.com/googleapis/java-spanner/compare/v6.75.0...v6.76.0) (2024-09-27) + + +### Features + +* Add opt-in flag and ClientInterceptor to propagate trace context for Spanner end to end tracing ([#3162](https://github.com/googleapis/java-spanner/issues/3162)) ([0b7fdaf](https://github.com/googleapis/java-spanner/commit/0b7fdaf1d25e81ca8dd35a0f8d8caa7b77a7e58c)) +* Add samples for backup schedule feature APIs. ([#3339](https://github.com/googleapis/java-spanner/issues/3339)) ([8cd5163](https://github.com/googleapis/java-spanner/commit/8cd516351e7859a81f00f17cb5071edbd804ea90)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.46.1 ([1719f44](https://github.com/googleapis/java-spanner/commit/1719f4465841354db3253fd132868394e530a82d)) + +## [6.75.0](https://github.com/googleapis/java-spanner/compare/v6.74.1...v6.75.0) (2024-09-19) + + +### Features + +* Support multiplexed session for blind write with single use transaction ([#3229](https://github.com/googleapis/java-spanner/issues/3229)) ([b3e2b0f](https://github.com/googleapis/java-spanner/commit/b3e2b0f4892951867715cb7f354c089fca4f050f)) + +## [6.74.1](https://github.com/googleapis/java-spanner/compare/v6.74.0...v6.74.1) (2024-09-16) + + +### Bug Fixes + +* Use core pool size 1 for maintainer ([#3314](https://github.com/googleapis/java-spanner/issues/3314)) ([cce008d](https://github.com/googleapis/java-spanner/commit/cce008d212535d32da990242973f7f517ca5d6dc)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.35.0 ([#3329](https://github.com/googleapis/java-spanner/issues/3329)) ([654835f](https://github.com/googleapis/java-spanner/commit/654835f2433b97665c74be9ec80c169ac905a720)) + +## [6.74.0](https://github.com/googleapis/java-spanner/compare/v6.73.0...v6.74.0) (2024-08-27) + + +### Features + +* **spanner:** Add edition field to the instance proto ([6b7e6ca](https://github.com/googleapis/java-spanner/commit/6b7e6ca109ea9679b5e36598d3c343fa40bff724)) + + +### Documentation + +* Change the example timestamps in Spanner Graph java sample code ([#3295](https://github.com/googleapis/java-spanner/issues/3295)) ([b6490b6](https://github.com/googleapis/java-spanner/commit/b6490b6a6ee2b7399431881a5e87b5ef7b577c89)) + +## [6.73.0](https://github.com/googleapis/java-spanner/compare/v6.72.0...v6.73.0) (2024-08-22) + + +### Features + +* Add option for cancelling queries when closing client ([#3276](https://github.com/googleapis/java-spanner/issues/3276)) ([95da1ed](https://github.com/googleapis/java-spanner/commit/95da1eddbc979f4ce78c9d1ac15bc4c1faba6dca)) + + +### Bug Fixes + +* Github workflow vulnerable to script injection ([#3232](https://github.com/googleapis/java-spanner/issues/3232)) ([599255c](https://github.com/googleapis/java-spanner/commit/599255c36d1fbe8317705a7eeb2a9e400c3efd15)) +* Make DecodeMode.DIRECT the deafult ([#3280](https://github.com/googleapis/java-spanner/issues/3280)) ([f31a95a](https://github.com/googleapis/java-spanner/commit/f31a95ab105407305e988e86c8f7b0d8654995e0)) +* Synchronize lazy ResultSet decoding ([#3267](https://github.com/googleapis/java-spanner/issues/3267)) ([4219cf8](https://github.com/googleapis/java-spanner/commit/4219cf86dba5e44d55f13ab118113f119c92b9e9)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.34.0 ([#3277](https://github.com/googleapis/java-spanner/issues/3277)) ([c449a91](https://github.com/googleapis/java-spanner/commit/c449a91628b005481996bce5ab449d62496a4d2d)) +* Update dependency commons-cli:commons-cli to v1.9.0 ([#3275](https://github.com/googleapis/java-spanner/issues/3275)) ([84790f7](https://github.com/googleapis/java-spanner/commit/84790f7d437e88739487b148bf963f0ac9dc3f96)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.41.0 ([#3269](https://github.com/googleapis/java-spanner/issues/3269)) ([a7458e9](https://github.com/googleapis/java-spanner/commit/a7458e970e4ca55ff3e312b2129e890576145db1)) +* Update dependency org.hamcrest:hamcrest to v3 ([#3271](https://github.com/googleapis/java-spanner/issues/3271)) ([fc2e343](https://github.com/googleapis/java-spanner/commit/fc2e343dc06f80617a2cd6f2bea59b0631e70678)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.11.0 ([#3272](https://github.com/googleapis/java-spanner/issues/3272)) ([1bc0c46](https://github.com/googleapis/java-spanner/commit/1bc0c469b99ebf3778592b04dbf175b00bf5b06e)) +* Update opentelemetry.version to v1.41.0 ([#3270](https://github.com/googleapis/java-spanner/issues/3270)) ([88f6b56](https://github.com/googleapis/java-spanner/commit/88f6b56fb243bb17b814a7ae150c8f38dced119a)) + + +### Documentation + +* Create a few code snippets as examples for using Spanner Graph using Java ([#3234](https://github.com/googleapis/java-spanner/issues/3234)) ([61f0ab7](https://github.com/googleapis/java-spanner/commit/61f0ab7a48bc3e51b830534b1cfa70e40166ec91)) + +## [6.72.0](https://github.com/googleapis/java-spanner/compare/v6.71.0...v6.72.0) (2024-08-07) + + +### Features + +* Add `RESOURCE_EXHAUSTED` to the list of retryable error codes ([e859b29](https://github.com/googleapis/java-spanner/commit/e859b29ccf4e68b1ab62cffdd4cf197011ba9878)) +* Add field order_by in spanner.proto ([e859b29](https://github.com/googleapis/java-spanner/commit/e859b29ccf4e68b1ab62cffdd4cf197011ba9878)) +* Add QueryCancellationAction message in executor protos ([e859b29](https://github.com/googleapis/java-spanner/commit/e859b29ccf4e68b1ab62cffdd4cf197011ba9878)) +* Add SessionPoolOptions, SpannerOptions protos in executor protos ([e859b29](https://github.com/googleapis/java-spanner/commit/e859b29ccf4e68b1ab62cffdd4cf197011ba9878)) +* Add support for multi region encryption config ([e859b29](https://github.com/googleapis/java-spanner/commit/e859b29ccf4e68b1ab62cffdd4cf197011ba9878)) +* Enable hermetic library generation ([#3129](https://github.com/googleapis/java-spanner/issues/3129)) ([94b2a86](https://github.com/googleapis/java-spanner/commit/94b2a8610ac02d2b4212c421f03b4e9561ec9949)) +* **spanner:** Add samples for instance partitions ([#3221](https://github.com/googleapis/java-spanner/issues/3221)) ([bc48bf2](https://github.com/googleapis/java-spanner/commit/bc48bf212e37441221b3b6c8742b07ff601f6c41)) +* **spanner:** Add support for Cloud Spanner Scheduled Backups ([e859b29](https://github.com/googleapis/java-spanner/commit/e859b29ccf4e68b1ab62cffdd4cf197011ba9878)) +* **spanner:** Adding `EXPECTED_FULFILLMENT_PERIOD` to the indicate instance creation times (with `FULFILLMENT_PERIOD_NORMAL` or `FULFILLMENT_PERIOD_EXTENDED` ENUM) with the extended instance creation time triggered by On-Demand Capacity Feature ([e859b29](https://github.com/googleapis/java-spanner/commit/e859b29ccf4e68b1ab62cffdd4cf197011ba9878)) +* **spanner:** Set manual affinity incase of gRPC-GCP extenstion ([#3215](https://github.com/googleapis/java-spanner/issues/3215)) ([86b306a](https://github.com/googleapis/java-spanner/commit/86b306a4189483a5fd2746052bed817443630567)) +* Support Read RPC OrderBy ([#3180](https://github.com/googleapis/java-spanner/issues/3180)) ([735bca5](https://github.com/googleapis/java-spanner/commit/735bca523e4ea53a24929fb2c27d282c41350e91)) + + +### Bug Fixes + +* Make sure commitAsync always finishes ([#3216](https://github.com/googleapis/java-spanner/issues/3216)) ([440c88b](https://github.com/googleapis/java-spanner/commit/440c88bd67e1c9d08445fe26b01bf243f7fd1ca4)) +* SessionPoolOptions.Builder#toBuilder() skipped useMultiplexedSessions ([#3197](https://github.com/googleapis/java-spanner/issues/3197)) ([027f92c](https://github.com/googleapis/java-spanner/commit/027f92cf32fee8217d2075db61fe0be58d43a40d)) + + +### Dependencies + +* Bump sdk-platform-java-config to 3.33.0 ([#3243](https://github.com/googleapis/java-spanner/issues/3243)) ([35907c6](https://github.com/googleapis/java-spanner/commit/35907c63ae981612ba24dd9605db493b5b864217)) +* Update dependencies to latest ([#3250](https://github.com/googleapis/java-spanner/issues/3250)) ([d1d566b](https://github.com/googleapis/java-spanner/commit/d1d566b096915a537e0978715c81bfca00e34ceb)) +* Update dependency com.google.auto.value:auto-value-annotations to v1.11.0 ([#3191](https://github.com/googleapis/java-spanner/issues/3191)) ([065cd48](https://github.com/googleapis/java-spanner/commit/065cd489964aaee42fffe1e71327906bde907205)) +* Update dependency com.google.cloud:google-cloud-trace to v2.47.0 ([#3067](https://github.com/googleapis/java-spanner/issues/3067)) ([e336ab8](https://github.com/googleapis/java-spanner/commit/e336ab81a1d392d56386f9302bf51bf14e385dad)) + +## [6.71.0](https://github.com/googleapis/java-spanner/compare/v6.70.0...v6.71.0) (2024-07-03) + + +### Features + +* Include thread name in traces ([#3173](https://github.com/googleapis/java-spanner/issues/3173)) ([92b1e07](https://github.com/googleapis/java-spanner/commit/92b1e079e6093bc4a2e7b458c1bbe0f62a0fada9)) +* Support multiplexed sessions for RO transactions ([#3141](https://github.com/googleapis/java-spanner/issues/3141)) ([2b8e9ed](https://github.com/googleapis/java-spanner/commit/2b8e9ededc1ea1a5e8d4f90083f2cf862fcc198a)) + +## [6.70.0](https://github.com/googleapis/java-spanner/compare/v6.69.0...v6.70.0) (2024-06-27) + + +### Features + +* Add field order_by in spanner.proto ([#3064](https://github.com/googleapis/java-spanner/issues/3064)) ([52ee196](https://github.com/googleapis/java-spanner/commit/52ee1967ee3a37fb0482ad8b51c6e77e28b79844)) + + +### Bug Fixes + +* Do not end transaction span when rolling back to savepoint ([#3167](https://github.com/googleapis/java-spanner/issues/3167)) ([8ec0cf2](https://github.com/googleapis/java-spanner/commit/8ec0cf2032dece545c9e4d8a794b80d06550b710)) +* Remove unused DmlBatch span ([#3147](https://github.com/googleapis/java-spanner/issues/3147)) ([f7891c1](https://github.com/googleapis/java-spanner/commit/f7891c1ca42727c775cdbe91bff8d55191a3d799)) + + +### Dependencies + +* Update dependencies ([#3181](https://github.com/googleapis/java-spanner/issues/3181)) ([0c787e6](https://github.com/googleapis/java-spanner/commit/0c787e6fa67d2a259a76bbd2d7f1cfa20a1dbee8)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.32.0 ([#3184](https://github.com/googleapis/java-spanner/issues/3184)) ([9c85a6f](https://github.com/googleapis/java-spanner/commit/9c85a6fabea527253ea40a8970cc9071804d94c4)) +* Update dependency commons-cli:commons-cli to v1.8.0 ([#3073](https://github.com/googleapis/java-spanner/issues/3073)) ([36b5340](https://github.com/googleapis/java-spanner/commit/36b5340ef8bf197fbc8ed882f76caff9a6fe84b6)) + +## [6.69.0](https://github.com/googleapis/java-spanner/compare/v6.68.1...v6.69.0) (2024-06-12) + + +### Features + +* Add option to enable ApiTracer ([#3095](https://github.com/googleapis/java-spanner/issues/3095)) ([a0a4bc5](https://github.com/googleapis/java-spanner/commit/a0a4bc58d4269a8c1e5e76d9a0469f649bb69148)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.31.0 ([#3159](https://github.com/googleapis/java-spanner/issues/3159)) ([1ee19d1](https://github.com/googleapis/java-spanner/commit/1ee19d19c2db30d79c8741cc5739de1c69fb95f9)) + +## [6.68.1](https://github.com/googleapis/java-spanner/compare/v6.68.0...v6.68.1) (2024-05-29) + + +### Bug Fixes + +* Make SessionPoolOptions#setUseMultiplexedSession(boolean) package private ([#3130](https://github.com/googleapis/java-spanner/issues/3130)) ([575c3e0](https://github.com/googleapis/java-spanner/commit/575c3e01541e12294dd37a622f0b1dca52d200ba)) + +## [6.68.0](https://github.com/googleapis/java-spanner/compare/v6.67.0...v6.68.0) (2024-05-27) + + +### Features + +* [java] allow passing libraries_bom_version from env ([#1967](https://github.com/googleapis/java-spanner/issues/1967)) ([#3112](https://github.com/googleapis/java-spanner/issues/3112)) ([7d5a52c](https://github.com/googleapis/java-spanner/commit/7d5a52c19a4b8028b78fc64a10f1ba6127fa6ffe)) +* Allow DML batches in transactions to execute analyzeUpdate ([#3114](https://github.com/googleapis/java-spanner/issues/3114)) ([dee7cda](https://github.com/googleapis/java-spanner/commit/dee7cdabe74058434e4d630846f066dc82fdf512)) +* **spanner:** Add support for Proto Columns in Connection API ([#3123](https://github.com/googleapis/java-spanner/issues/3123)) ([7e7c814](https://github.com/googleapis/java-spanner/commit/7e7c814045dc84aaa57e7c716b0221e6cb19bcd1)) + + +### Bug Fixes + +* Allow getMetadata() calls before calling next() ([#3111](https://github.com/googleapis/java-spanner/issues/3111)) ([39902c3](https://github.com/googleapis/java-spanner/commit/39902c384f3f7f9438252cbee287f2428faf1440)) + + +### Dependencies + +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.10.2 ([#3117](https://github.com/googleapis/java-spanner/issues/3117)) ([ddebbbb](https://github.com/googleapis/java-spanner/commit/ddebbbbeef976f61f23cdd66c5f7c1f412e2f9bd)) + +## [6.67.0](https://github.com/googleapis/java-spanner/compare/v6.66.0...v6.67.0) (2024-05-22) + + +### Features + +* Add tracing for batchUpdate, executeUpdate, and connections ([#3097](https://github.com/googleapis/java-spanner/issues/3097)) ([45cdcfc](https://github.com/googleapis/java-spanner/commit/45cdcfcde02aa7976b017a90f81c2ccd28658c8f)) + + +### Performance Improvements + +* Minor optimizations to the standard query path ([#3101](https://github.com/googleapis/java-spanner/issues/3101)) ([ec820a1](https://github.com/googleapis/java-spanner/commit/ec820a16e2b3cb1a12a15231491b75cd73afaa13)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-monitoring to v3.44.0 ([#3099](https://github.com/googleapis/java-spanner/issues/3099)) ([da44e93](https://github.com/googleapis/java-spanner/commit/da44e932a39ac0124b63914f8ea926998c10ea2e)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.30.1 ([#3116](https://github.com/googleapis/java-spanner/issues/3116)) ([d205a73](https://github.com/googleapis/java-spanner/commit/d205a73714786a609673012b771e7a0722b3e1f2)) + ## [6.66.0](https://github.com/googleapis/java-spanner/compare/v6.65.1...v6.66.0) (2024-05-03) diff --git a/README.md b/README.md index fd69927635d..4f12f6e18e0 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ If you are using Maven with [BOM][libraries-bom], add this to your pom.xml file: com.google.cloud libraries-bom - 26.37.0 + 26.48.0 pom import @@ -36,13 +36,12 @@ If you are using Maven with [BOM][libraries-bom], add this to your pom.xml file: If you are using Maven without the BOM, add this to your dependencies: - ```xml com.google.cloud google-cloud-spanner - 6.65.1 + 6.76.0 ``` @@ -50,22 +49,21 @@ If you are using Maven without the BOM, add this to your dependencies: If you are using Gradle 5.x or later, add this to your dependencies: ```Groovy -implementation platform('com.google.cloud:libraries-bom:26.38.0') +implementation platform('com.google.cloud:libraries-bom:26.48.0') implementation 'com.google.cloud:google-cloud-spanner' ``` If you are using Gradle without BOM, add this to your dependencies: ```Groovy -implementation 'com.google.cloud:google-cloud-spanner:6.66.0' +implementation 'com.google.cloud:google-cloud-spanner:6.78.0' ``` If you are using SBT, add this to your dependencies: ```Scala -libraryDependencies += "com.google.cloud" % "google-cloud-spanner" % "6.66.0" +libraryDependencies += "com.google.cloud" % "google-cloud-spanner" % "6.78.0" ``` - ## Authentication @@ -93,13 +91,7 @@ to add `google-cloud-spanner` as a dependency in your code. ## About Cloud Spanner -[Cloud Spanner][product-docs] is a fully managed, mission-critical, -relational database service that offers transactional consistency at global scale, -schemas, SQL (ANSI 2011 with extensions), and automatic, synchronous replication -for high availability. - -Be sure to activate the Cloud Spanner API on the Developer's Console to -use Cloud Spanner from your project. +[Cloud Spanner][product-docs] is a fully managed, mission-critical, relational database service that offers transactional consistency at global scale, \nschemas, SQL (ANSI 2011 with extensions), and automatic, synchronous replication \nfor high availability.\n\nBe sure to activate the Cloud Spanner API on the Developer's Console to\nuse Cloud Spanner from your project. See the [Cloud Spanner client library docs][javadocs] to learn how to use this Cloud Spanner Client Library. @@ -250,13 +242,46 @@ OpenTelemetry openTelemetry = OpenTelemetrySdk.builder() .build() SpannerOptions options = SpannerOptions.newBuilder() -// Inject OpenTelemetry object via Spanner Options or register OpenTelmetry object as Global +// Inject OpenTelemetry object via Spanner Options or register OpenTelemetry object as Global .setOpenTelemetry(openTelemetry) .build(); Spanner spanner = options.getService(); ``` +#### OpenTelemetry SQL Statement Tracing +The OpenTelemetry traces that are generated by the Java client include any request and transaction +tags that have been set. The traces can also include the SQL statements that are executed and the +name of the thread that executes the statement. Enable this with the `enableExtendedTracing` +option: + +``` +SpannerOptions options = SpannerOptions.newBuilder() + .setOpenTelemetry(openTelemetry) + .setEnableExtendedTracing(true) + .build(); +``` + +This option can also be enabled by setting the environment variable +`SPANNER_ENABLE_EXTENDED_TRACING=true`. + +#### OpenTelemetry API Tracing +You can enable tracing of each API call that the Spanner client executes with the `enableApiTracing` +option. These traces also include any retry attempts for an API call: + +``` +SpannerOptions options = SpannerOptions.newBuilder() +.setOpenTelemetry(openTelemetry) +.setEnableApiTracing(true) +.build(); +``` + +This option can also be enabled by setting the environment variable +`SPANNER_ENABLE_API_TRACING=true`. + +> Note: The attribute keys that are used for additional information about retry attempts and the number of requests might change in a future release. + + ### Instrument with OpenCensus > Note: OpenCensus project is deprecated. See [Sunsetting OpenCensus](https://opentelemetry.io/blog/2023/sunsetting-opencensus/). @@ -390,6 +415,38 @@ SpannerOptions options = SpannerOptions.newBuilder() Spanner spanner = options.getService(); ``` +#### OpenTelemetry SQL Statement Tracing +The OpenTelemetry traces that are generated by the Java client include any request and transaction +tags that have been set. The traces can also include the SQL statements that are executed and the +name of the thread that executes the statement. Enable this with the `enableExtendedTracing` +option: + +``` +SpannerOptions options = SpannerOptions.newBuilder() + .setOpenTelemetry(openTelemetry) + .setEnableExtendedTracing(true) + .build(); +``` + +This option can also be enabled by setting the environment variable +`SPANNER_ENABLE_EXTENDED_TRACING=true`. + +#### OpenTelemetry API Tracing +You can enable tracing of each API call that the Spanner client executes with the `enableApiTracing` +option. These traces also include any retry attempts for an API call: + +``` +SpannerOptions options = SpannerOptions.newBuilder() +.setOpenTelemetry(openTelemetry) +.setEnableApiTracing(true) +.build(); +``` + +This option can also be enabled by setting the environment variable +`SPANNER_ENABLE_API_TRACING=true`. + +> Note: The attribute keys that are used for additional information about retry attempts and the number of requests might change in a future release. + ## Migrate from OpenCensus to OpenTelemetry > Using the [OpenTelemetry OpenCensus Bridge](https://mvnrepository.com/artifact/io.opentelemetry/opentelemetry-opencensus-shim), you can immediately begin exporting your metrics and traces with OpenTelemetry @@ -429,13 +486,11 @@ Samples are in the [`samples/`](https://github.com/googleapis/java-spanner/tree/ | Sample | Source Code | Try it | | --------------------------- | --------------------------------- | ------ | -| Database Operations | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/native-image/src/main/java/com/example/spanner/DatabaseOperations.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/native-image/src/main/java/com/example/spanner/DatabaseOperations.java) | -| Instance Operations | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/native-image/src/main/java/com/example/spanner/InstanceOperations.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/native-image/src/main/java/com/example/spanner/InstanceOperations.java) | -| Native Image Spanner Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/native-image/src/main/java/com/example/spanner/NativeImageSpannerSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/native-image/src/main/java/com/example/spanner/NativeImageSpannerSample.java) | | Add And Drop Database Role | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/AddAndDropDatabaseRole.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/AddAndDropDatabaseRole.java) | | Add Json Column Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/AddJsonColumnSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/AddJsonColumnSample.java) | | Add Jsonb Column Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/AddJsonbColumnSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/AddJsonbColumnSample.java) | | Add Numeric Column Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/AddNumericColumnSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/AddNumericColumnSample.java) | +| Add Proto Column Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/AddProtoColumnSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/AddProtoColumnSample.java) | | Alter Sequence Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/AlterSequenceSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/AlterSequenceSample.java) | | Alter Table With Foreign Key Delete Cascade Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/AlterTableWithForeignKeyDeleteCascadeSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/AlterTableWithForeignKeyDeleteCascadeSample.java) | | Async Dml Example | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/AsyncDmlExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/AsyncDmlExample.java) | @@ -454,23 +509,29 @@ Samples are in the [`samples/`](https://github.com/googleapis/java-spanner/tree/ | Create Database With Default Leader Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithDefaultLeaderSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithDefaultLeaderSample.java) | | Create Database With Encryption Key | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithEncryptionKey.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithEncryptionKey.java) | | Create Database With Version Retention Period Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithVersionRetentionPeriodSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateDatabaseWithVersionRetentionPeriodSample.java) | +| Create Full Backup Schedule Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateFullBackupScheduleSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateFullBackupScheduleSample.java) | +| Create Incremental Backup Schedule Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateIncrementalBackupScheduleSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateIncrementalBackupScheduleSample.java) | | Create Instance Config Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateInstanceConfigSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateInstanceConfigSample.java) | | Create Instance Example | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateInstanceExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateInstanceExample.java) | +| Create Instance Partition Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateInstancePartitionSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateInstancePartitionSample.java) | | Create Instance With Autoscaling Config Example | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithAutoscalingConfigExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithAutoscalingConfigExample.java) | | Create Instance With Processing Units Example | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithProcessingUnitsExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithProcessingUnitsExample.java) | | Create Sequence Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateSequenceSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateSequenceSample.java) | | Create Table With Foreign Key Delete Cascade Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/CreateTableWithForeignKeyDeleteCascadeSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CreateTableWithForeignKeyDeleteCascadeSample.java) | | Custom Timeout And Retry Settings Example | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/CustomTimeoutAndRetrySettingsExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/CustomTimeoutAndRetrySettingsExample.java) | +| Delete Backup Schedule Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/DeleteBackupScheduleSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/DeleteBackupScheduleSample.java) | | Delete Instance Config Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/DeleteInstanceConfigSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/DeleteInstanceConfigSample.java) | | Delete Using Dml Returning Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/DeleteUsingDmlReturningSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/DeleteUsingDmlReturningSample.java) | | Directed Read Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/DirectedReadSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/DirectedReadSample.java) | | Drop Foreign Key Constraint Delete Cascade Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/DropForeignKeyConstraintDeleteCascadeSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/DropForeignKeyConstraintDeleteCascadeSample.java) | | Drop Sequence Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/DropSequenceSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/DropSequenceSample.java) | | Enable Fine Grained Access | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/EnableFineGrainedAccess.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/EnableFineGrainedAccess.java) | +| Get Backup Schedule Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/GetBackupScheduleSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/GetBackupScheduleSample.java) | | Get Commit Stats Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/GetCommitStatsSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/GetCommitStatsSample.java) | | Get Database Ddl Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/GetDatabaseDdlSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/GetDatabaseDdlSample.java) | | Get Instance Config Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/GetInstanceConfigSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/GetInstanceConfigSample.java) | | Insert Using Dml Returning Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/InsertUsingDmlReturningSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/InsertUsingDmlReturningSample.java) | +| List Backup Schedules Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/ListBackupSchedulesSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/ListBackupSchedulesSample.java) | | List Database Roles | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/ListDatabaseRoles.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/ListDatabaseRoles.java) | | List Databases Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/ListDatabasesSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/ListDatabasesSample.java) | | List Instance Config Operations Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/ListInstanceConfigOperationsSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/ListInstanceConfigOperationsSample.java) | @@ -494,21 +555,28 @@ Samples are in the [`samples/`](https://github.com/googleapis/java-spanner/tree/ | Query With Json Parameter Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/QueryWithJsonParameterSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/QueryWithJsonParameterSample.java) | | Query With Jsonb Parameter Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/QueryWithJsonbParameterSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/QueryWithJsonbParameterSample.java) | | Query With Numeric Parameter Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/QueryWithNumericParameterSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/QueryWithNumericParameterSample.java) | +| Query With Proto Parameter Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/QueryWithProtoParameterSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/QueryWithProtoParameterSample.java) | | Quickstart Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/QuickstartSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/QuickstartSample.java) | | Read Data With Database Role | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/ReadDataWithDatabaseRole.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/ReadDataWithDatabaseRole.java) | | Restore Backup With Encryption Key | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/RestoreBackupWithEncryptionKey.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/RestoreBackupWithEncryptionKey.java) | | Set Max Commit Delay Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/SetMaxCommitDelaySample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/SetMaxCommitDelaySample.java) | +| Singer Proto | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/SingerProto.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/SingerProto.java) | +| Spanner Graph Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/SpannerGraphSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/SpannerGraphSample.java) | | Spanner Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/SpannerSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/SpannerSample.java) | | Statement Timeout Example | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/StatementTimeoutExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/StatementTimeoutExample.java) | | Tag Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/TagSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/TagSample.java) | | Tracing Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/TracingSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/TracingSample.java) | | Transaction Timeout Example | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/TransactionTimeoutExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/TransactionTimeoutExample.java) | +| Update Backup Schedule Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/UpdateBackupScheduleSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/UpdateBackupScheduleSample.java) | | Update Database Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/UpdateDatabaseSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/UpdateDatabaseSample.java) | | Update Database With Default Leader Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/UpdateDatabaseWithDefaultLeaderSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/UpdateDatabaseWithDefaultLeaderSample.java) | | Update Instance Config Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/UpdateInstanceConfigSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/UpdateInstanceConfigSample.java) | +| Update Instance Example | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/UpdateInstanceExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/UpdateInstanceExample.java) | | Update Json Data Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/UpdateJsonDataSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/UpdateJsonDataSample.java) | | Update Jsonb Data Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/UpdateJsonbDataSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/UpdateJsonbDataSample.java) | | Update Numeric Data Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/UpdateNumericDataSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/UpdateNumericDataSample.java) | +| Update Proto Data Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/UpdateProtoDataSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/UpdateProtoDataSample.java) | +| Update Proto Data Sample Using Dml | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/UpdateProtoDataSampleUsingDml.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/UpdateProtoDataSampleUsingDml.java) | | Update Using Dml Returning Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/UpdateUsingDmlReturningSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/UpdateUsingDmlReturningSample.java) | | Add And Drop Database Role | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/AddAndDropDatabaseRole.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/AddAndDropDatabaseRole.java) | | Add Json Column Sample | [source code](https://github.com/googleapis/java-spanner/blob/main/samples/snippets/src/main/java/com/example/spanner/admin/archived/AddJsonColumnSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-spanner&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/spanner/admin/archived/AddJsonColumnSample.java) | @@ -651,7 +719,7 @@ Java is a registered trademark of Oracle and/or its affiliates. [kokoro-badge-link-5]: http://storage.googleapis.com/cloud-devrel-public/java/badges/java-spanner/java11.html [stability-image]: https://img.shields.io/badge/stability-stable-green [maven-version-image]: https://img.shields.io/maven-central/v/com.google.cloud/google-cloud-spanner.svg -[maven-version-link]: https://central.sonatype.com/artifact/com.google.cloud/google-cloud-spanner/6.66.0 +[maven-version-link]: https://central.sonatype.com/artifact/com.google.cloud/google-cloud-spanner/6.78.0 [authentication]: https://github.com/googleapis/google-cloud-java#authentication [auth-scopes]: https://developers.google.com/identity/protocols/oauth2/scopes [predefined-iam-roles]: https://cloud.google.com/iam/docs/understanding-roles#predefined_roles diff --git a/benchmarks/pom.xml b/benchmarks/pom.xml index b6883cb775c..1ab983259a7 100644 --- a/benchmarks/pom.xml +++ b/benchmarks/pom.xml @@ -24,7 +24,7 @@ com.google.cloud google-cloud-spanner-parent - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT @@ -33,8 +33,8 @@ 1.8 UTF-8 UTF-8 - 2.9.1 - 1.36.0 + 2.10.1 + 1.42.1 @@ -49,12 +49,12 @@ com.google.cloud.opentelemetry exporter-trace - 0.25.2 + 0.33.0 com.google.cloud.opentelemetry exporter-metrics - 0.25.2 + 0.33.0 @@ -85,24 +85,24 @@ io.opentelemetry opentelemetry-bom - 1.37.0 + 1.42.1 pom import com.google.cloud google-cloud-spanner - 6.66.0 + 6.76.0 commons-cli commons-cli - 1.6.0 + 1.9.0 com.google.auto.value auto-value-annotations - 1.10.4 + 1.11.0 com.kohlschutter.junixsocket @@ -118,7 +118,7 @@ commons-cli commons-cli - 1.7.0 + 1.9.0 @@ -133,7 +133,7 @@ org.codehaus.mojo exec-maven-plugin - 3.2.0 + 3.4.1 com.google.cloud.spanner.benchmark.LatencyBenchmark false diff --git a/generation_config.yaml b/generation_config.yaml new file mode 100644 index 00000000000..54b3a1cff92 --- /dev/null +++ b/generation_config.yaml @@ -0,0 +1,28 @@ +gapic_generator_version: 2.47.0 +googleapis_commitish: de509e38d37a2a9d8b95e1ce78831189f4f3c0f4 +libraries_bom_version: 26.48.0 +libraries: + - api_shortname: spanner + name_pretty: Cloud Spanner + product_documentation: https://cloud.google.com/spanner/docs/ + client_documentation: https://cloud.google.com/java/docs/reference/google-cloud-spanner/latest/history + api_description: is a fully managed, mission-critical, relational database service that offers transactional consistency at global scale, \nschemas, SQL (ANSI 2011 with extensions), and automatic, synchronous replication \nfor high availability.\n\nBe sure to activate the Cloud Spanner API on the Developer's Console to\nuse Cloud Spanner from your project. + issue_tracker: https://issuetracker.google.com/issues?q=componentid:190851%2B%20status:open + release_level: stable + language: java + min_java_version: 8 + repo: googleapis/java-spanner + repo_short: java-spanner + distribution_name: com.google.cloud:google-cloud-spanner + api_id: spanner.googleapis.com + transport: grpc + requires_billing: true + codeowner_team: '@googleapis/api-spanner-java' + library_type: GAPIC_COMBO + excluded_poms: google-cloud-spanner-bom + recommended_package: com.google.cloud.spanner + GAPICs: + - proto_path: google/spanner/admin/database/v1 + - proto_path: google/spanner/admin/instance/v1 + - proto_path: google/spanner/executor/v1 + - proto_path: google/spanner/v1 diff --git a/google-cloud-spanner-bom/pom.xml b/google-cloud-spanner-bom/pom.xml index da3025651a4..40894e1919b 100644 --- a/google-cloud-spanner-bom/pom.xml +++ b/google-cloud-spanner-bom/pom.xml @@ -3,12 +3,12 @@ 4.0.0 com.google.cloud google-cloud-spanner-bom - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT pom com.google.cloud sdk-platform-java-config - 3.30.0 + 3.37.0 Google Cloud Spanner BOM @@ -53,43 +53,43 @@ com.google.cloud google-cloud-spanner - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT com.google.cloud google-cloud-spanner test-jar - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT com.google.api.grpc grpc-google-cloud-spanner-v1 - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT com.google.api.grpc grpc-google-cloud-spanner-admin-instance-v1 - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT com.google.api.grpc grpc-google-cloud-spanner-admin-database-v1 - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT com.google.api.grpc proto-google-cloud-spanner-admin-instance-v1 - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT com.google.api.grpc proto-google-cloud-spanner-v1 - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT com.google.api.grpc proto-google-cloud-spanner-admin-database-v1 - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT diff --git a/google-cloud-spanner-executor/assembly-descriptor.xml b/google-cloud-spanner-executor/assembly-descriptor.xml deleted file mode 100644 index 8a9e7f8f500..00000000000 --- a/google-cloud-spanner-executor/assembly-descriptor.xml +++ /dev/null @@ -1,27 +0,0 @@ - - jar-with-dependencies - - jar - - false - - - / - false - true - - - io.grpc.LoadBalancerProvider - - - - - - - ${project.build.outputDirectory} - . - - - diff --git a/google-cloud-spanner-executor/pom.xml b/google-cloud-spanner-executor/pom.xml index c56d3a375fa..73b9a329389 100644 --- a/google-cloud-spanner-executor/pom.xml +++ b/google-cloud-spanner-executor/pom.xml @@ -5,14 +5,14 @@ 4.0.0 com.google.cloud google-cloud-spanner-executor - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT jar Google Cloud Spanner Executor com.google.cloud google-cloud-spanner-parent - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT @@ -129,12 +129,12 @@ commons-cli commons-cli - 1.7.0 + 1.9.0 commons-io commons-io - 2.16.1 + 2.17.0 @@ -160,35 +160,66 @@ - google-spanner-cloud-executor - maven-assembly-plugin - 3.7.1 + maven-resources-plugin + + + copy-resources + validate + + copy-resources + + + ${project.build.directory}/spanner-executor + + + resources + true + + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + + copy-dependencies + prepare-package + + copy-dependencies + + + ${project.build.directory}/spanner-executor/lib + false + false + true + + + + + + org.apache.maven.plugins + maven-jar-plugin - - assembly-descriptor.xml - + spanner-executor/google-spanner-cloud-executor + false com.google.cloud.executor.spanner.WorkerProxy + true + lib/ - - - make-assembly - package - - single - - - org.apache.maven.plugins maven-failsafe-plugin - 3.2.5 + 3.5.0 diff --git a/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudClientExecutor.java b/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudClientExecutor.java index 6d8ef262454..d180f55d06a 100644 --- a/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudClientExecutor.java +++ b/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudClientExecutor.java @@ -75,6 +75,7 @@ import com.google.cloud.spanner.encryption.CustomerManagedEncryption; import com.google.cloud.spanner.v1.stub.SpannerStubSettings; import com.google.common.base.Function; +import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -191,6 +192,16 @@ public CloudClientExecutor(boolean enableGrpcFaultInjector) { this.enableGrpcFaultInjector = enableGrpcFaultInjector; } + // Helper for unexpected results. + public static String unexpectedExceptionResponse(Exception e) { + return "Unexpected error in Github Cloud Java Client Executor: " + + e + + " Msg: " + + e.getMessage() + + " Stack: " + + Joiner.on("\n").join(e.getStackTrace()); + } + /** * Implementation of a ReadWriteTransaction, which is a wrapper of the cloud TransactionRunner. It * stores all the status and related variables from the start to finish, and control the running @@ -792,10 +803,13 @@ private synchronized Spanner getClient(long timeoutSeconds, boolean useMultiplex .setTotalTimeout(rpcTimeout) .build(); - com.google.cloud.spanner.SessionPoolOptions sessionPoolOptions = - SessionPoolOptionsHelper.setUseMultiplexedSession( - com.google.cloud.spanner.SessionPoolOptions.newBuilder(), useMultiplexedSession) - .build(); + com.google.cloud.spanner.SessionPoolOptions.Builder poolOptionsBuilder = + com.google.cloud.spanner.SessionPoolOptions.newBuilder(); + SessionPoolOptionsHelper.setUseMultiplexedSession( + com.google.cloud.spanner.SessionPoolOptions.newBuilder(), useMultiplexedSession); + SessionPoolOptionsHelper.setUseMultiplexedSessionBlindWrite( + com.google.cloud.spanner.SessionPoolOptions.newBuilder(), useMultiplexedSession); + com.google.cloud.spanner.SessionPoolOptions sessionPoolOptions = poolOptionsBuilder.build(); // Cloud Spanner Client does not support global retry settings, // Thus, we need to add retry settings to each individual stub. SpannerOptions.Builder optionsBuilder = @@ -1083,7 +1097,7 @@ private Status executeCreateCloudInstance( return sender.finishWithError( toStatus( SpannerExceptionFactory.newSpannerException( - ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage()))); + ErrorCode.INVALID_ARGUMENT, CloudClientExecutor.unexpectedExceptionResponse(e)))); } return sender.finishWithOK(); } @@ -2665,6 +2679,7 @@ private Status processResults( executionContext.finishRead(Status.OK); return sender.finishWithOK(); } catch (SpannerException e) { + LOGGER.log(Level.WARNING, "Encountered exception: ", e); Status status = toStatus(e); LOGGER.log( Level.WARNING, diff --git a/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudExecutor.java b/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudExecutor.java index cda9923f392..537a6ed4c33 100644 --- a/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudExecutor.java +++ b/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudExecutor.java @@ -416,39 +416,48 @@ public Status sendOutcome(SpannerActionOutcome outcome) { /** Map Cloud ErrorCode to Status. */ protected Status toStatus(SpannerException e) { + String errorMessage = e.getMessage(); + com.google.rpc.Status rpcStatus = io.grpc.protobuf.StatusProto.fromThrowable(e); + if (rpcStatus != null) { + if (rpcStatus.getDetailsCount() > 0) { + errorMessage += "/n"; + } + for (int i = 0; i < rpcStatus.getDetailsCount(); i++) { + errorMessage += "\nError detail: " + rpcStatus.getDetails(i).toString(); + } + } switch (e.getErrorCode()) { case INVALID_ARGUMENT: - return Status.fromCode(Status.INVALID_ARGUMENT.getCode()).withDescription(e.getMessage()); + return Status.fromCode(Status.INVALID_ARGUMENT.getCode()).withDescription(errorMessage); case PERMISSION_DENIED: - return Status.fromCode(Status.PERMISSION_DENIED.getCode()).withDescription(e.getMessage()); + return Status.fromCode(Status.PERMISSION_DENIED.getCode()).withDescription(errorMessage); case ABORTED: - return Status.fromCode(Status.ABORTED.getCode()).withDescription(e.getMessage()); + return Status.fromCode(Status.ABORTED.getCode()).withDescription(errorMessage); case ALREADY_EXISTS: - return Status.fromCode(Status.ALREADY_EXISTS.getCode()).withDescription(e.getMessage()); + return Status.fromCode(Status.ALREADY_EXISTS.getCode()).withDescription(errorMessage); case CANCELLED: - return Status.fromCode(Status.CANCELLED.getCode()).withDescription(e.getMessage()); + return Status.fromCode(Status.CANCELLED.getCode()).withDescription(errorMessage); case INTERNAL: return Status.fromCode(Status.INTERNAL.getCode()) - .withDescription(e.getMessage() + e.getReason() == null ? "" : ": " + e.getReason()); + .withDescription(errorMessage + e.getReason() == null ? "" : ": " + e.getReason()); case FAILED_PRECONDITION: - return Status.fromCode(Status.FAILED_PRECONDITION.getCode()) - .withDescription(e.getMessage()); + return Status.fromCode(Status.FAILED_PRECONDITION.getCode()).withDescription(errorMessage); case NOT_FOUND: - return Status.fromCode(Status.NOT_FOUND.getCode()).withDescription(e.getMessage()); + return Status.fromCode(Status.NOT_FOUND.getCode()).withDescription(errorMessage); case DEADLINE_EXCEEDED: - return Status.fromCode(Status.DEADLINE_EXCEEDED.getCode()).withDescription(e.getMessage()); + return Status.fromCode(Status.DEADLINE_EXCEEDED.getCode()).withDescription(errorMessage); case RESOURCE_EXHAUSTED: - return Status.fromCode(Status.RESOURCE_EXHAUSTED.getCode()).withDescription(e.getMessage()); + return Status.fromCode(Status.RESOURCE_EXHAUSTED.getCode()).withDescription(errorMessage); case OUT_OF_RANGE: - return Status.fromCode(Status.OUT_OF_RANGE.getCode()).withDescription(e.getMessage()); + return Status.fromCode(Status.OUT_OF_RANGE.getCode()).withDescription(errorMessage); case UNAUTHENTICATED: - return Status.fromCode(Status.UNAUTHENTICATED.getCode()).withDescription(e.getMessage()); + return Status.fromCode(Status.UNAUTHENTICATED.getCode()).withDescription(errorMessage); case UNIMPLEMENTED: - return Status.fromCode(Status.UNIMPLEMENTED.getCode()).withDescription(e.getMessage()); + return Status.fromCode(Status.UNIMPLEMENTED.getCode()).withDescription(errorMessage); case UNAVAILABLE: - return Status.fromCode(Status.UNAVAILABLE.getCode()).withDescription(e.getMessage()); + return Status.fromCode(Status.UNAVAILABLE.getCode()).withDescription(errorMessage); case UNKNOWN: - return Status.fromCode(Status.UNKNOWN.getCode()).withDescription(e.getMessage()); + return Status.fromCode(Status.UNKNOWN.getCode()).withDescription(errorMessage); default: return Status.fromCode(Status.UNKNOWN.getCode()) .withDescription("Unsupported Spanner error code: " + e.getErrorCode()); diff --git a/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/SessionPoolOptionsHelper.java b/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/SessionPoolOptionsHelper.java index 8f978a39a31..dafaa4a1f31 100644 --- a/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/SessionPoolOptionsHelper.java +++ b/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/SessionPoolOptionsHelper.java @@ -30,4 +30,12 @@ public static SessionPoolOptions.Builder setUseMultiplexedSession( SessionPoolOptions.Builder sessionPoolOptionsBuilder, boolean useMultiplexedSession) { return sessionPoolOptionsBuilder.setUseMultiplexedSession(useMultiplexedSession); } + + // TODO: Remove when multiplexed session for blind write is released. + public static SessionPoolOptions.Builder setUseMultiplexedSessionBlindWrite( + SessionPoolOptions.Builder sessionPoolOptionsBuilder, + boolean useMultiplexedSessionBlindWrite) { + return sessionPoolOptionsBuilder.setUseMultiplexedSessionBlindWrite( + useMultiplexedSessionBlindWrite); + } } diff --git a/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/SpannerExecutorProxySettings.java b/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/SpannerExecutorProxySettings.java index b8b364469fe..f24a2f2bdc3 100644 --- a/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/SpannerExecutorProxySettings.java +++ b/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/SpannerExecutorProxySettings.java @@ -49,7 +49,9 @@ *

The builder of this class is recursive, so contained classes are themselves builders. When * build() is called, the tree of builders is called to create the complete settings object. * - *

For example, to set the total timeout of executeActionAsync to 30 seconds: + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of executeActionAsync: * *

{@code
  * // This snippet has been automatically generated and should be regarded as a code template only.
@@ -66,11 +68,22 @@
  *             .executeActionAsyncSettings()
  *             .getRetrySettings()
  *             .toBuilder()
- *             .setTotalTimeout(Duration.ofSeconds(30))
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
  *             .build());
  * SpannerExecutorProxySettings spannerExecutorProxySettings =
  *     spannerExecutorProxySettingsBuilder.build();
  * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://github.com/googleapis/google-cloud-java/blob/main/docs/client_retries.md) for + * additional support in setting retries. */ @Generated("by gapic-generator-java") public class SpannerExecutorProxySettings extends ClientSettings { diff --git a/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/stub/SpannerExecutorProxyStubSettings.java b/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/stub/SpannerExecutorProxyStubSettings.java index 2b8c17ada97..fbcf8bada1f 100644 --- a/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/stub/SpannerExecutorProxyStubSettings.java +++ b/google-cloud-spanner-executor/src/main/java/com/google/cloud/spanner/executor/v1/stub/SpannerExecutorProxyStubSettings.java @@ -17,6 +17,7 @@ package com.google.cloud.spanner.executor.v1.stub; import com.google.api.core.ApiFunction; +import com.google.api.core.ObsoleteApi; import com.google.api.gax.core.GaxProperties; import com.google.api.gax.core.GoogleCredentialsProvider; import com.google.api.gax.core.InstantiatingExecutorProvider; @@ -57,7 +58,9 @@ *

The builder of this class is recursive, so contained classes are themselves builders. When * build() is called, the tree of builders is called to create the complete settings object. * - *

For example, to set the total timeout of executeActionAsync to 30 seconds: + *

For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of executeActionAsync: * *

{@code
  * // This snippet has been automatically generated and should be regarded as a code template only.
@@ -74,11 +77,22 @@
  *             .executeActionAsyncSettings()
  *             .getRetrySettings()
  *             .toBuilder()
- *             .setTotalTimeout(Duration.ofSeconds(30))
+ *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
+ *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
+ *             .setMaxAttempts(5)
+ *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
+ *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
+ *             .setRetryDelayMultiplier(1.3)
+ *             .setRpcTimeoutMultiplier(1.5)
+ *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
  *             .build());
  * SpannerExecutorProxyStubSettings spannerExecutorProxySettings =
  *     spannerExecutorProxySettingsBuilder.build();
  * }
+ * + * Please refer to the [Client Side Retry + * Guide](https://github.com/googleapis/google-cloud-java/blob/main/docs/client_retries.md) for + * additional support in setting retries. */ @Generated("by gapic-generator-java") public class SpannerExecutorProxyStubSettings @@ -119,6 +133,7 @@ public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuild } /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") public static String getDefaultEndpoint() { return "spanner-cloud-executor.googleapis.com:443"; } diff --git a/google-cloud-spanner-executor/src/main/resources/META-INF/native-image/com.google.cloud.spanner.executor.v1/reflect-config.json b/google-cloud-spanner-executor/src/main/resources/META-INF/native-image/com.google.cloud.spanner.executor.v1/reflect-config.json index b2933abb24e..4fe624b7aa7 100644 --- a/google-cloud-spanner-executor/src/main/resources/META-INF/native-image/com.google.cloud.spanner.executor.v1/reflect-config.json +++ b/google-cloud-spanner-executor/src/main/resources/META-INF/native-image/com.google.cloud.spanner.executor.v1/reflect-config.json @@ -305,6 +305,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.api.ResourceDescriptor", "queryAllDeclaredConstructors": true, @@ -1709,6 +1727,42 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.database.v1.BackupSchedule", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.BackupSchedule$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.BackupScheduleSpec", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.BackupScheduleSpec$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig", "queryAllDeclaredConstructors": true, @@ -1835,6 +1889,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.database.v1.CreateBackupScheduleRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateBackupScheduleRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.database.v1.CreateDatabaseMetadata", "queryAllDeclaredConstructors": true, @@ -1871,6 +1943,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.database.v1.CrontabSpec", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CrontabSpec$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.database.v1.Database", "queryAllDeclaredConstructors": true, @@ -1961,6 +2051,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.database.v1.DropDatabaseRequest", "queryAllDeclaredConstructors": true, @@ -2024,6 +2132,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.database.v1.FullBackupSpec", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.FullBackupSpec$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.database.v1.GetBackupRequest", "queryAllDeclaredConstructors": true, @@ -2042,6 +2168,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.database.v1.GetBackupScheduleRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.GetBackupScheduleRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.database.v1.GetDatabaseDdlRequest", "queryAllDeclaredConstructors": true, @@ -2096,6 +2240,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.database.v1.IncrementalBackupSpec", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.IncrementalBackupSpec$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.database.v1.ListBackupOperationsRequest", "queryAllDeclaredConstructors": true, @@ -2132,6 +2294,42 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupSchedulesRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupSchedulesRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupSchedulesResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupSchedulesResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.database.v1.ListBackupsRequest", "queryAllDeclaredConstructors": true, @@ -2420,6 +2618,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata", "queryAllDeclaredConstructors": true, @@ -2501,6 +2717,42 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AsymmetricAutoscalingOption", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AsymmetricAutoscalingOption$AutoscalingConfigOverrides", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AsymmetricAutoscalingOption$AutoscalingConfigOverrides$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AsymmetricAutoscalingOption$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AutoscalingLimits", "queryAllDeclaredConstructors": true, @@ -2789,6 +3041,15 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.instance.v1.Instance$Edition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.instance.v1.Instance$State", "queryAllDeclaredConstructors": true, @@ -3041,6 +3302,60 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.instance.v1.MoveInstanceMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.MoveInstanceMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.MoveInstanceRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.MoveInstanceRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.MoveInstanceResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.MoveInstanceResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.instance.v1.OperationProgress", "queryAllDeclaredConstructors": true, @@ -3059,6 +3374,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.instance.v1.ReplicaComputeCapacity", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ReplicaComputeCapacity$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.instance.v1.ReplicaInfo", "queryAllDeclaredConstructors": true, @@ -3086,6 +3419,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.instance.v1.ReplicaSelection", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ReplicaSelection$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata", "queryAllDeclaredConstructors": true, @@ -4238,6 +4589,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.executor.v1.QueryCancellationAction", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.executor.v1.QueryCancellationAction$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.executor.v1.QueryResult", "queryAllDeclaredConstructors": true, @@ -5102,6 +5471,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.v1.MultiplexedSessionPrecommitToken", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.MultiplexedSessionPrecommitToken$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.v1.Mutation", "queryAllDeclaredConstructors": true, @@ -5363,6 +5750,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.v1.ReadRequest$LockHint", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ReadRequest$OrderBy", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.v1.RequestOptions", "queryAllDeclaredConstructors": true, diff --git a/google-cloud-spanner/clirr-ignored-differences.xml b/google-cloud-spanner/clirr-ignored-differences.xml index 92dcab6e2ce..8e7a392302a 100644 --- a/google-cloud-spanner/clirr-ignored-differences.xml +++ b/google-cloud-spanner/clirr-ignored-differences.xml @@ -337,6 +337,12 @@ com/google/cloud/spanner/spi/v1/GapicSpannerRpc com.google.cloud.spanner.spi.v1.SpannerRpc$StreamingCall read(com.google.spanner.v1.ReadRequest, com.google.cloud.spanner.spi.v1.SpannerRpc$ResultStreamConsumer, java.util.Map) + + 7005 + com/google/cloud/spanner/spi/v1/GapicSpannerRpc + com.google.api.gax.longrunning.OperationFuture updateDatabaseDdl(java.lang.String, java.lang.Iterable, java.lang.String) + com.google.api.gax.longrunning.OperationFuture updateDatabaseDdl(com.google.cloud.spanner.Database, java.lang.Iterable, java.lang.String) + 7005 com/google/cloud/spanner/spi/v1/GapicSpannerRpc @@ -387,6 +393,12 @@ com/google/cloud/spanner/spi/v1/SpannerRpc com.google.cloud.spanner.spi.v1.SpannerRpc$StreamingCall read(com.google.spanner.v1.ReadRequest, com.google.cloud.spanner.spi.v1.SpannerRpc$ResultStreamConsumer, java.util.Map) + + 7005 + com/google/cloud/spanner/spi/v1/SpannerRpc + com.google.api.gax.longrunning.OperationFuture updateDatabaseDdl(java.lang.String, java.lang.Iterable, java.lang.String) + com.google.api.gax.longrunning.OperationFuture updateDatabaseDdl(com.google.cloud.spanner.Database, java.lang.Iterable, java.lang.String) + 7005 com/google/cloud/spanner/spi/v1/SpannerRpc @@ -656,7 +668,7 @@ com/google/cloud/spanner/connection/Connection com.google.cloud.spanner.Spanner getSpanner() - + 7012 @@ -669,4 +681,81 @@ com.google.cloud.spanner.connection.DdlInTransactionMode getDdlInTransactionMode() + + + 7012 + com/google/cloud/spanner/SpannerOptions$SpannerEnvironment + boolean isEnableExtendedTracing() + + + + + 7012 + com/google/cloud/spanner/SpannerOptions$SpannerEnvironment + boolean isEnableApiTracing() + + + + + 7012 + com/google/cloud/spanner/SpannerOptions$SpannerEnvironment + boolean isEnableEndToEndTracing() + + + + + 7012 + com/google/cloud/spanner/SpannerOptions$SpannerEnvironment + boolean isEnableBuiltInMetrics() + + + + + 7012 + com/google/cloud/spanner/connection/Connection + boolean isExcludeTxnFromChangeStreams() + + + 7012 + com/google/cloud/spanner/connection/Connection + void setExcludeTxnFromChangeStreams(boolean) + + + + + 7012 + com/google/cloud/spanner/connection/Connection + byte[] getProtoDescriptors() + + + 7012 + com/google/cloud/spanner/connection/Connection + void setProtoDescriptors(byte[]) + + + + + 7009 + com/google/cloud/spanner/SessionPoolOptions$Builder + com.google.cloud.spanner.SessionPoolOptions$Builder setUseMultiplexedSession(boolean) + + + + + 7012 + com/google/cloud/spanner/connection/Connection + void reset() + + + + + 7012 + com/google/cloud/spanner/connection/Connection + void setKeepTransactionAlive(boolean) + + + 7012 + com/google/cloud/spanner/connection/Connection + boolean isKeepTransactionAlive() + diff --git a/google-cloud-spanner/pom.xml b/google-cloud-spanner/pom.xml index 9a855db9e49..2e5763a2a28 100644 --- a/google-cloud-spanner/pom.xml +++ b/google-cloud-spanner/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.google.cloud google-cloud-spanner - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT jar Google Cloud Spanner https://github.com/googleapis/java-spanner @@ -11,12 +11,11 @@ com.google.cloud google-cloud-spanner-parent - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT google-cloud-spanner 0.31.1 - 1.37.0 com.google.cloud.spanner.GceTestEnvConfig projects/gcloud-devel/instances/spanner-testing-east1 gcloud-devel @@ -108,7 +107,7 @@ com.google.cloud.spanner.ParallelIntegrationTest - 8 + 12 true com.google.cloud.spanner.ParallelIntegrationTest @@ -208,19 +207,10 @@ com.google.api.grpc proto-google-common-protos - - com.google.api.grpc - proto-google-cloud-spanner-executor-v1 - com.google.api.grpc grpc-google-common-protos - - com.google.api.grpc - grpc-google-cloud-spanner-executor-v1 - test - com.google.api.grpc proto-google-iam-v1 @@ -256,6 +246,32 @@ io.opentelemetry opentelemetry-context + + io.opentelemetry + opentelemetry-sdk + + + io.opentelemetry + opentelemetry-sdk-common + + + io.opentelemetry + opentelemetry-sdk-metrics + + + com.google.cloud.opentelemetry + detector-resources-support + + + com.google.cloud + google-cloud-monitoring + 3.53.0 + + + com.google.api.grpc + proto-google-cloud-monitoring-v3 + 3.53.0 + com.google.auth google-auth-library-oauth2-http @@ -342,6 +358,12 @@ ${graal-sdk.version} provided + + org.graalvm.sdk + nativeimage + ${graal-sdk.version} + provided + @@ -349,6 +371,18 @@ junit test + + + + com.google.api.grpc + proto-google-cloud-spanner-executor-v1 + provided + + + com.google.api.grpc + grpc-google-cloud-spanner-executor-v1 + provided + @@ -394,7 +428,7 @@ org.hamcrest hamcrest - 2.2 + 3.0 test @@ -411,28 +445,14 @@ test - - io.opentelemetry - opentelemetry-sdk - ${opentelemetry.version} - test - - - io.opentelemetry - opentelemetry-sdk-metrics - ${opentelemetry.version} - test - io.opentelemetry opentelemetry-sdk-trace - ${opentelemetry.version} test io.opentelemetry opentelemetry-sdk-testing - ${opentelemetry.version} test @@ -579,5 +599,18 @@
+ + executor-tests + + + com.google.api.grpc + proto-google-cloud-spanner-executor-v1 + + + com.google.api.grpc + grpc-google-cloud-spanner-executor-v1 + + + diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractMultiplexedSessionDatabaseClient.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractMultiplexedSessionDatabaseClient.java index 92035a18418..27253bf1e13 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractMultiplexedSessionDatabaseClient.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractMultiplexedSessionDatabaseClient.java @@ -53,13 +53,7 @@ public CommitResponse writeWithOptions(Iterable mutations, Transaction @Override public Timestamp writeAtLeastOnce(Iterable mutations) throws SpannerException { - throw new UnsupportedOperationException(); - } - - @Override - public CommitResponse writeAtLeastOnceWithOptions( - Iterable mutations, TransactionOption... options) throws SpannerException { - throw new UnsupportedOperationException(); + return writeAtLeastOnceWithOptions(mutations).getCommitTimestamp(); } @Override diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractReadContext.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractReadContext.java index 4d17ba4e1b7..caf0e06379e 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractReadContext.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractReadContext.java @@ -36,6 +36,7 @@ import com.google.cloud.spanner.SessionClient.SessionOption; import com.google.cloud.spanner.SessionImpl.SessionTransaction; import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.cloud.spanner.spi.v1.SpannerRpc.Option; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; @@ -68,6 +69,7 @@ abstract class AbstractReadContext abstract static class Builder, T extends AbstractReadContext> { private SessionImpl session; + private boolean cancelQueryWhenClientIsClosed; private SpannerRpc rpc; private ISpan span; private TraceWrapper tracer; @@ -90,6 +92,11 @@ B setSession(SessionImpl session) { return self(); } + B setCancelQueryWhenClientIsClosed(boolean cancelQueryWhenClientIsClosed) { + this.cancelQueryWhenClientIsClosed = cancelQueryWhenClientIsClosed; + return self(); + } + B setRpc(SpannerRpc rpc) { this.rpc = rpc; return self(); @@ -183,7 +190,7 @@ static Builder newBuilder() { @GuardedBy("lock") private boolean used; - private final Map channelHint; + private Map channelHint; private SingleReadContext(Builder builder) { super(builder); @@ -226,6 +233,16 @@ TransactionSelector getTransactionSelector() { Map getTransactionChannelHint() { return channelHint; } + + @Override + boolean prepareRetryOnDifferentGrpcChannel() { + if (session.getIsMultiplexed() && channelHint.get(Option.CHANNEL_HINT) != null) { + long channelHintForTransaction = Option.CHANNEL_HINT.getLong(channelHint) + 1L; + channelHint = optionMap(SessionOption.channelHint(channelHintForTransaction)); + return true; + } + return super.prepareRetryOnDifferentGrpcChannel(); + } } private static void assertTimestampAvailable(boolean available) { @@ -439,6 +456,7 @@ void initTransaction() { final Object lock = new Object(); final SessionImpl session; + final boolean cancelQueryWhenClientIsClosed; final SpannerRpc rpc; final ExecutorProvider executorProvider; ISpan span; @@ -457,7 +475,7 @@ void initTransaction() { // A per-transaction sequence number used to identify this ExecuteSqlRequests. Required for DML, // ignored for query by the server. - private AtomicLong seqNo = new AtomicLong(); + private final AtomicLong seqNo = new AtomicLong(); // Allow up to 512MB to be buffered (assuming 1MB chunks). In practice, restart tokens are sent // much more frequently. @@ -468,6 +486,7 @@ void initTransaction() { AbstractReadContext(Builder builder) { this.session = builder.session; + this.cancelQueryWhenClientIsClosed = builder.cancelQueryWhenClientIsClosed; this.rpc = builder.rpc; this.defaultPrefetchChunks = builder.defaultPrefetchChunks; this.defaultQueryOptions = builder.defaultQueryOptions; @@ -488,6 +507,10 @@ long getSeqNo() { return seqNo.incrementAndGet(); } + protected boolean isReadOnly() { + return true; + } + protected boolean isRouteToLeader() { return false; } @@ -622,19 +645,18 @@ private ResultSet executeQueryInternal( @VisibleForTesting QueryOptions buildQueryOptions(QueryOptions requestOptions) { // Shortcut for the most common return value. - if (defaultQueryOptions.equals(QueryOptions.getDefaultInstance()) && requestOptions == null) { - return QueryOptions.getDefaultInstance(); - } - // Create a builder based on the default query options. - QueryOptions.Builder builder = defaultQueryOptions.toBuilder(); - // Then overwrite with specific options for this query. - if (requestOptions != null) { - builder.mergeFrom(requestOptions); + if (requestOptions == null) { + return defaultQueryOptions; } - return builder.build(); + return defaultQueryOptions.toBuilder().mergeFrom(requestOptions).build(); } RequestOptions buildRequestOptions(Options options) { + // Shortcut for the most common return value. + if (!(options.hasPriority() || options.hasTag() || getTransactionTag() != null)) { + return RequestOptions.getDefaultInstance(); + } + RequestOptions.Builder builder = RequestOptions.newBuilder(); if (options.hasPriority()) { builder.setPriority(options.priority()); @@ -655,16 +677,7 @@ ExecuteSqlRequest.Builder getExecuteSqlRequestBuilder( .setSql(statement.getSql()) .setQueryMode(queryMode) .setSession(session.getName()); - Map stmtParameters = statement.getParameters(); - if (!stmtParameters.isEmpty()) { - com.google.protobuf.Struct.Builder paramsBuilder = builder.getParamsBuilder(); - for (Map.Entry param : stmtParameters.entrySet()) { - paramsBuilder.putFields(param.getKey(), Value.toProto(param.getValue())); - if (param.getValue() != null && param.getValue().getType() != null) { - builder.putParamTypes(param.getKey(), param.getValue().getType().toProto()); - } - } - } + addParameters(builder, statement.getParameters()); if (withTransactionSelector) { TransactionSelector selector = getTransactionSelector(); if (selector != null) { @@ -679,12 +692,26 @@ ExecuteSqlRequest.Builder getExecuteSqlRequestBuilder( } else if (defaultDirectedReadOptions != null) { builder.setDirectedReadOptions(defaultDirectedReadOptions); } - builder.setSeqno(getSeqNo()); + if (!isReadOnly()) { + builder.setSeqno(getSeqNo()); + } builder.setQueryOptions(buildQueryOptions(statement.getQueryOptions())); builder.setRequestOptions(buildRequestOptions(options)); return builder; } + static void addParameters(ExecuteSqlRequest.Builder builder, Map stmtParameters) { + if (!stmtParameters.isEmpty()) { + com.google.protobuf.Struct.Builder paramsBuilder = builder.getParamsBuilder(); + for (Map.Entry param : stmtParameters.entrySet()) { + paramsBuilder.putFields(param.getKey(), Value.toProto(param.getValue())); + if (param.getValue() != null && param.getValue().getType() != null) { + builder.putParamTypes(param.getKey(), param.getValue().getType().toProto()); + } + } + } + } + ExecuteBatchDmlRequest.Builder getExecuteBatchDmlRequestBuilder( Iterable statements, Options options) { ExecuteBatchDmlRequest.Builder builder = @@ -735,11 +762,14 @@ ResultSet executeQueryInternalWithOptions( SpannerImpl.QUERY, span, tracer, + tracer.createStatementAttributes(statement, options), + session.getErrorHandler(), rpc.getExecuteQueryRetrySettings(), rpc.getExecuteQueryRetryableCodes()) { @Override CloseableIterator startStream(@Nullable ByteString resumeToken) { - GrpcStreamIterator stream = new GrpcStreamIterator(statement, prefetchChunks); + GrpcStreamIterator stream = + new GrpcStreamIterator(statement, prefetchChunks, cancelQueryWhenClientIsClosed); if (partitionToken != null) { request.setPartitionToken(partitionToken); } @@ -764,6 +794,11 @@ CloseableIterator startStream(@Nullable ByteString resumeToken stream.setCall(call, request.getTransaction().hasBegin()); return stream; } + + @Override + boolean prepareIteratorForRetryOnDifferentGrpcChannel() { + return AbstractReadContext.this.prepareRetryOnDifferentGrpcChannel(); + } }; return new GrpcResultSet( stream, this, options.hasDecodeMode() ? options.decodeMode() : defaultDecodeMode); @@ -830,6 +865,10 @@ public void close() { */ abstract Map getTransactionChannelHint(); + boolean prepareRetryOnDifferentGrpcChannel() { + return false; + } + /** * Returns the transaction tag for this {@link AbstractReadContext} or null if this * {@link AbstractReadContext} does not have a transaction tag. @@ -892,6 +931,9 @@ ResultSet readInternalWithOptions( if (readOptions.hasDataBoostEnabled()) { builder.setDataBoostEnabled(readOptions.dataBoostEnabled()); } + if (readOptions.hasOrderBy()) { + builder.setOrderBy(readOptions.orderBy()); + } if (readOptions.hasDirectedReadOptions()) { builder.setDirectedReadOptions(readOptions.directedReadOptions()); } else if (defaultDirectedReadOptions != null) { @@ -905,11 +947,13 @@ ResultSet readInternalWithOptions( SpannerImpl.READ, span, tracer, + session.getErrorHandler(), rpc.getReadRetrySettings(), rpc.getReadRetryableCodes()) { @Override CloseableIterator startStream(@Nullable ByteString resumeToken) { - GrpcStreamIterator stream = new GrpcStreamIterator(prefetchChunks); + GrpcStreamIterator stream = + new GrpcStreamIterator(prefetchChunks, cancelQueryWhenClientIsClosed); TransactionSelector selector = null; if (resumeToken != null) { builder.setResumeToken(resumeToken); @@ -932,6 +976,11 @@ CloseableIterator startStream(@Nullable ByteString resumeToken stream.setCall(call, /* withBeginTransaction = */ builder.getTransaction().hasBegin()); return stream; } + + @Override + boolean prepareIteratorForRetryOnDifferentGrpcChannel() { + return AbstractReadContext.this.prepareRetryOnDifferentGrpcChannel(); + } }; return new GrpcResultSet( stream, this, readOptions.hasDecodeMode() ? readOptions.decodeMode() : defaultDecodeMode); diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BatchClientImpl.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BatchClientImpl.java index 22fb9f710c1..3d886dd383b 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BatchClientImpl.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BatchClientImpl.java @@ -54,6 +54,7 @@ public BatchReadOnlyTransaction batchReadOnlyTransaction(TimestampBound bound) { return new BatchReadOnlyTransactionImpl( MultiUseReadOnlyTransaction.newBuilder() .setSession(session) + .setCancelQueryWhenClientIsClosed(true) .setRpc(sessionClient.getSpanner().getRpc()) .setTimestampBound(bound) .setDefaultQueryOptions( @@ -75,6 +76,7 @@ public BatchReadOnlyTransaction batchReadOnlyTransaction(BatchTransactionId batc return new BatchReadOnlyTransactionImpl( MultiUseReadOnlyTransaction.newBuilder() .setSession(session) + .setCancelQueryWhenClientIsClosed(true) .setRpc(sessionClient.getSpanner().getRpc()) .setTransactionId(batchTransactionId.getTransactionId()) .setTimestamp(batchTransactionId.getTimestamp()) diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInMetricsConstant.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInMetricsConstant.java new file mode 100644 index 00000000000..4f8b091d550 --- /dev/null +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInMetricsConstant.java @@ -0,0 +1,172 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.InternalApi; +import com.google.api.gax.tracing.OpenTelemetryMetricsRecorder; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.sdk.metrics.Aggregation; +import io.opentelemetry.sdk.metrics.InstrumentSelector; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.View; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +@InternalApi +public class BuiltInMetricsConstant { + + public static final String METER_NAME = "spanner.googleapis.com/internal/client"; + + public static final String GAX_METER_NAME = OpenTelemetryMetricsRecorder.GAX_METER_NAME; + + static final String OPERATION_LATENCIES_NAME = "operation_latencies"; + static final String ATTEMPT_LATENCIES_NAME = "attempt_latencies"; + static final String OPERATION_LATENCY_NAME = "operation_latency"; + static final String ATTEMPT_LATENCY_NAME = "attempt_latency"; + static final String OPERATION_COUNT_NAME = "operation_count"; + static final String ATTEMPT_COUNT_NAME = "attempt_count"; + + public static final Set SPANNER_METRICS = + ImmutableSet.of( + OPERATION_LATENCIES_NAME, + ATTEMPT_LATENCIES_NAME, + OPERATION_COUNT_NAME, + ATTEMPT_COUNT_NAME) + .stream() + .map(m -> METER_NAME + '/' + m) + .collect(Collectors.toSet()); + + public static final String SPANNER_RESOURCE_TYPE = "spanner_instance_client"; + + public static final AttributeKey PROJECT_ID_KEY = AttributeKey.stringKey("project_id"); + public static final AttributeKey INSTANCE_ID_KEY = AttributeKey.stringKey("instance_id"); + public static final AttributeKey LOCATION_ID_KEY = AttributeKey.stringKey("location"); + public static final AttributeKey INSTANCE_CONFIG_ID_KEY = + AttributeKey.stringKey("instance_config"); + public static final AttributeKey CLIENT_HASH_KEY = AttributeKey.stringKey("client_hash"); + + // These metric labels will be promoted to the spanner monitored resource fields + public static final Set> SPANNER_PROMOTED_RESOURCE_LABELS = + ImmutableSet.of( + PROJECT_ID_KEY, + INSTANCE_ID_KEY, + INSTANCE_CONFIG_ID_KEY, + LOCATION_ID_KEY, + CLIENT_HASH_KEY); + + public static final AttributeKey DATABASE_KEY = AttributeKey.stringKey("database"); + public static final AttributeKey CLIENT_UID_KEY = AttributeKey.stringKey("client_uid"); + public static final AttributeKey CLIENT_NAME_KEY = AttributeKey.stringKey("client_name"); + public static final AttributeKey METHOD_KEY = AttributeKey.stringKey("method"); + public static final AttributeKey STATUS_KEY = AttributeKey.stringKey("status"); + public static final AttributeKey DIRECT_PATH_ENABLED_KEY = + AttributeKey.stringKey("directpath_enabled"); + public static final AttributeKey DIRECT_PATH_USED_KEY = + AttributeKey.stringKey("directpath_used"); + + // IP address prefixes allocated for DirectPath backends. + public static final String DP_IPV6_PREFIX = "2001:4860:8040"; + public static final String DP_IPV4_PREFIX = "34.126"; + + public static final Set COMMON_ATTRIBUTES = + ImmutableSet.of( + PROJECT_ID_KEY, + INSTANCE_ID_KEY, + LOCATION_ID_KEY, + INSTANCE_CONFIG_ID_KEY, + CLIENT_UID_KEY, + CLIENT_HASH_KEY, + METHOD_KEY, + STATUS_KEY, + DATABASE_KEY, + CLIENT_NAME_KEY, + DIRECT_PATH_ENABLED_KEY, + DIRECT_PATH_USED_KEY); + + static Aggregation AGGREGATION_WITH_MILLIS_HISTOGRAM = + Aggregation.explicitBucketHistogram( + ImmutableList.of( + 0.0, 0.5, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, + 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 25.0, 30.0, 40.0, 50.0, 65.0, 80.0, 100.0, 130.0, + 160.0, 200.0, 250.0, 300.0, 400.0, 500.0, 650.0, 800.0, 1000.0, 2000.0, 5000.0, + 10000.0, 20000.0, 50000.0, 100000.0, 200000.0, 400000.0, 800000.0, 1600000.0, + 3200000.0)); + + static Map getAllViews() { + ImmutableMap.Builder views = ImmutableMap.builder(); + defineView( + views, + BuiltInMetricsConstant.OPERATION_LATENCY_NAME, + BuiltInMetricsConstant.OPERATION_LATENCIES_NAME, + BuiltInMetricsConstant.AGGREGATION_WITH_MILLIS_HISTOGRAM, + InstrumentType.HISTOGRAM, + "ms"); + defineView( + views, + BuiltInMetricsConstant.ATTEMPT_LATENCY_NAME, + BuiltInMetricsConstant.ATTEMPT_LATENCIES_NAME, + BuiltInMetricsConstant.AGGREGATION_WITH_MILLIS_HISTOGRAM, + InstrumentType.HISTOGRAM, + "ms"); + defineView( + views, + BuiltInMetricsConstant.OPERATION_COUNT_NAME, + BuiltInMetricsConstant.OPERATION_COUNT_NAME, + Aggregation.sum(), + InstrumentType.COUNTER, + "1"); + defineView( + views, + BuiltInMetricsConstant.ATTEMPT_COUNT_NAME, + BuiltInMetricsConstant.ATTEMPT_COUNT_NAME, + Aggregation.sum(), + InstrumentType.COUNTER, + "1"); + return views.build(); + } + + private static void defineView( + ImmutableMap.Builder viewMap, + String metricName, + String metricViewName, + Aggregation aggregation, + InstrumentType type, + String unit) { + InstrumentSelector selector = + InstrumentSelector.builder() + .setName(BuiltInMetricsConstant.METER_NAME + '/' + metricName) + .setMeterName(BuiltInMetricsConstant.GAX_METER_NAME) + .setType(type) + .setUnit(unit) + .build(); + Set attributesFilter = + BuiltInMetricsConstant.COMMON_ATTRIBUTES.stream() + .map(AttributeKey::getKey) + .collect(Collectors.toSet()); + View view = + View.builder() + .setName(BuiltInMetricsConstant.METER_NAME + '/' + metricViewName) + .setAggregation(aggregation) + .setAttributeFilter(attributesFilter) + .build(); + viewMap.put(selector, view); + } +} diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInOpenTelemetryMetricsProvider.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInOpenTelemetryMetricsProvider.java new file mode 100644 index 00000000000..a7665f8556a --- /dev/null +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInOpenTelemetryMetricsProvider.java @@ -0,0 +1,177 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.opentelemetry.detection.GCPPlatformDetector.SupportedPlatform.GOOGLE_KUBERNETES_ENGINE; +import static com.google.cloud.spanner.BuiltInMetricsConstant.CLIENT_HASH_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.CLIENT_NAME_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.CLIENT_UID_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.INSTANCE_CONFIG_ID_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.LOCATION_ID_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.PROJECT_ID_KEY; + +import com.google.auth.Credentials; +import com.google.cloud.opentelemetry.detection.AttributeKeys; +import com.google.cloud.opentelemetry.detection.DetectedPlatform; +import com.google.cloud.opentelemetry.detection.GCPPlatformDetector; +import com.google.common.hash.HashFunction; +import com.google.common.hash.Hashing; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.lang.reflect.Method; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nullable; + +final class BuiltInOpenTelemetryMetricsProvider { + + static BuiltInOpenTelemetryMetricsProvider INSTANCE = new BuiltInOpenTelemetryMetricsProvider(); + + private static final Logger logger = + Logger.getLogger(BuiltInOpenTelemetryMetricsProvider.class.getName()); + + private static String taskId; + + private OpenTelemetry openTelemetry; + + private BuiltInOpenTelemetryMetricsProvider() {} + + OpenTelemetry getOrCreateOpenTelemetry(String projectId, @Nullable Credentials credentials) { + try { + if (this.openTelemetry == null) { + SdkMeterProviderBuilder sdkMeterProviderBuilder = SdkMeterProvider.builder(); + BuiltInOpenTelemetryMetricsView.registerBuiltinMetrics( + SpannerCloudMonitoringExporter.create(projectId, credentials), sdkMeterProviderBuilder); + this.openTelemetry = + OpenTelemetrySdk.builder().setMeterProvider(sdkMeterProviderBuilder.build()).build(); + } + return this.openTelemetry; + } catch (IOException ex) { + logger.log( + Level.WARNING, + "Unable to get OpenTelemetry object for client side metrics, will skip exporting client side metrics", + ex); + return null; + } + } + + Map createClientAttributes(String projectId, String client_name) { + Map clientAttributes = new HashMap<>(); + clientAttributes.put(LOCATION_ID_KEY.getKey(), detectClientLocation()); + clientAttributes.put(PROJECT_ID_KEY.getKey(), projectId); + // TODO: Replace this with real value. + clientAttributes.put(INSTANCE_CONFIG_ID_KEY.getKey(), "unknown"); + clientAttributes.put(CLIENT_NAME_KEY.getKey(), client_name); + String clientUid = getDefaultTaskValue(); + clientAttributes.put(CLIENT_UID_KEY.getKey(), clientUid); + clientAttributes.put(CLIENT_HASH_KEY.getKey(), generateClientHash(clientUid)); + return clientAttributes; + } + + /** + * Generates a 6-digit zero-padded all lower case hexadecimal representation of hash of the + * accounting group. The hash utilizes the 10 most significant bits of the value returned by + * `Hashing.goodFastHash(64).hashBytes()`, so effectively the returned values are uniformly + * distributed in the range [000000, 0003ff]. + * + *

The primary purpose of this function is to generate a hash value for the `client_hash` + * resource label using `client_uid` metric field. The range of values is chosen to be small + * enough to keep the cardinality of the Resource targets under control. Note: If at later time + * the range needs to be increased, it can be done by increasing the value of `kPrefixLength` to + * up to 24 bits without changing the format of the returned value. + * + * @return Returns a 6-digit zero-padded all lower case hexadecimal representation of hash of the + * accounting group. + */ + static String generateClientHash(String clientUid) { + if (clientUid == null) { + return "000000"; + } + + HashFunction hashFunction = Hashing.goodFastHash(64); + Long hash = hashFunction.hashBytes(clientUid.getBytes()).asLong(); + // Don't change this value without reading above comment + int kPrefixLength = 10; + long shiftedValue = hash >>> (64 - kPrefixLength); + return String.format("%06x", shiftedValue); + } + + static String detectClientLocation() { + GCPPlatformDetector detector = GCPPlatformDetector.DEFAULT_INSTANCE; + DetectedPlatform detectedPlatform = detector.detectPlatform(); + // All platform except GKE uses "cloud_region" for region attribute. + String region = detectedPlatform.getAttributes().get("cloud_region"); + if (detectedPlatform.getSupportedPlatform() == GOOGLE_KUBERNETES_ENGINE) { + region = detectedPlatform.getAttributes().get(AttributeKeys.GKE_CLUSTER_LOCATION); + } + return region == null ? "global" : region; + } + + /** + * Generates a unique identifier for the Client_uid metric field. The identifier is composed of a + * UUID, the process ID (PID), and the hostname. + * + *

For Java 9 and later, the PID is obtained using the ProcessHandle API. For Java 8, the PID + * is extracted from ManagementFactory.getRuntimeMXBean().getName(). + * + * @return A unique identifier string in the format UUID@PID@hostname + */ + private static String getDefaultTaskValue() { + if (taskId == null) { + String identifier = UUID.randomUUID().toString(); + String pid = getProcessId(); + + try { + String hostname = InetAddress.getLocalHost().getHostName(); + taskId = identifier + "@" + pid + "@" + hostname; + } catch (UnknownHostException e) { + logger.log(Level.INFO, "Unable to get the hostname.", e); + taskId = identifier + "@" + pid + "@localhost"; + } + } + return taskId; + } + + private static String getProcessId() { + try { + // Check if Java 9+ and ProcessHandle class is available + Class processHandleClass = Class.forName("java.lang.ProcessHandle"); + Method currentMethod = processHandleClass.getMethod("current"); + Object processHandleInstance = currentMethod.invoke(null); + Method pidMethod = processHandleClass.getMethod("pid"); + long pid = (long) pidMethod.invoke(processHandleInstance); + return Long.toString(pid); + } catch (Exception e) { + // Fallback to Java 8 method + final String jvmName = ManagementFactory.getRuntimeMXBean().getName(); + if (jvmName != null && jvmName.contains("@")) { + return jvmName.split("@")[0]; + } else { + return "unknown"; + } + } + } +} diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInOpenTelemetryMetricsView.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInOpenTelemetryMetricsView.java new file mode 100644 index 00000000000..4a09c0d856a --- /dev/null +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BuiltInOpenTelemetryMetricsView.java @@ -0,0 +1,33 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; + +class BuiltInOpenTelemetryMetricsView { + + private BuiltInOpenTelemetryMetricsView() {} + + /** Register built-in metrics on the {@link SdkMeterProviderBuilder} with credentials. */ + static void registerBuiltinMetrics( + MetricExporter metricExporter, SdkMeterProviderBuilder builder) { + BuiltInMetricsConstant.getAllViews().forEach(builder::registerView); + builder.registerMetricReader(PeriodicMetricReader.create(metricExporter)); + } +} diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/CompositeTracer.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/CompositeTracer.java new file mode 100644 index 00000000000..085a91fb88e --- /dev/null +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/CompositeTracer.java @@ -0,0 +1,191 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.InternalApi; +import com.google.api.gax.tracing.ApiTracer; +import com.google.api.gax.tracing.BaseApiTracer; +import com.google.api.gax.tracing.MetricsTracer; +import com.google.common.collect.ImmutableList; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import org.threeten.bp.Duration; + +@InternalApi +public class CompositeTracer extends BaseApiTracer { + private final List children; + + public CompositeTracer(List children) { + this.children = ImmutableList.copyOf(children); + } + + @Override + public Scope inScope() { + final List childScopes = new ArrayList<>(children.size()); + + for (ApiTracer child : children) { + childScopes.add(child.inScope()); + } + + return new Scope() { + @Override + public void close() { + for (Scope childScope : childScopes) { + childScope.close(); + } + } + }; + } + + @Override + public void operationSucceeded() { + for (ApiTracer child : children) { + child.operationSucceeded(); + } + } + + @Override + public void operationCancelled() { + for (ApiTracer child : children) { + child.operationCancelled(); + } + } + + @Override + public void operationFailed(Throwable error) { + for (ApiTracer child : children) { + child.operationFailed(error); + } + } + + @Override + public void connectionSelected(String id) { + for (ApiTracer child : children) { + child.connectionSelected(id); + } + } + + @Override + public void attemptStarted(int attemptNumber) { + for (ApiTracer child : children) { + child.attemptStarted(null, attemptNumber); + } + } + + @Override + public void attemptStarted(Object request, int attemptNumber) { + for (ApiTracer child : children) { + child.attemptStarted(request, attemptNumber); + } + } + + @Override + public void attemptSucceeded() { + for (ApiTracer child : children) { + child.attemptSucceeded(); + } + } + + @Override + public void attemptCancelled() { + for (ApiTracer child : children) { + child.attemptCancelled(); + } + } + + @Override + public void attemptFailed(Throwable error, Duration delay) { + for (ApiTracer child : children) { + child.attemptFailed(error, delay); + } + } + + @Override + public void attemptFailedDuration(Throwable error, java.time.Duration delay) { + for (ApiTracer child : children) { + child.attemptFailedDuration(error, delay); + } + } + + @Override + public void attemptFailedRetriesExhausted(Throwable error) { + for (ApiTracer child : children) { + child.attemptFailedRetriesExhausted(error); + } + } + + @Override + public void attemptPermanentFailure(Throwable error) { + for (ApiTracer child : children) { + child.attemptPermanentFailure(error); + } + } + + @Override + public void lroStartFailed(Throwable error) { + for (ApiTracer child : children) { + child.lroStartFailed(error); + } + } + + @Override + public void lroStartSucceeded() { + for (ApiTracer child : children) { + child.lroStartSucceeded(); + } + } + + @Override + public void responseReceived() { + for (ApiTracer child : children) { + child.responseReceived(); + } + } + + @Override + public void requestSent() { + for (ApiTracer child : children) { + child.requestSent(); + } + } + + @Override + public void batchRequestSent(long elementCount, long requestSize) { + for (ApiTracer child : children) { + child.batchRequestSent(elementCount, requestSize); + } + } + + public void addAttributes(String key, String value) { + for (ApiTracer child : children) { + if (child instanceof MetricsTracer) { + MetricsTracer metricsTracer = (MetricsTracer) child; + metricsTracer.addAttributes(key, value); + } + } + } + + public void addAttributes(Map attributes) { + for (ApiTracer child : children) { + if (child instanceof MetricsTracer) { + MetricsTracer metricsTracer = (MetricsTracer) child; + attributes.forEach((key, value) -> metricsTracer.addAttributes(key, value)); + } + } + } +} diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/CompositeTracerFactory.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/CompositeTracerFactory.java new file mode 100644 index 00000000000..2e3965de095 --- /dev/null +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/CompositeTracerFactory.java @@ -0,0 +1,46 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.InternalApi; +import com.google.api.gax.tracing.ApiTracer; +import com.google.api.gax.tracing.ApiTracerFactory; +import com.google.api.gax.tracing.BaseApiTracerFactory; +import com.google.api.gax.tracing.SpanName; +import com.google.common.collect.ImmutableList; +import java.util.ArrayList; +import java.util.List; + +@InternalApi +public class CompositeTracerFactory extends BaseApiTracerFactory { + + private final List apiTracerFactories; + + public CompositeTracerFactory(List apiTracerFactories) { + this.apiTracerFactories = ImmutableList.copyOf(apiTracerFactories); + } + + @Override + public ApiTracer newTracer(ApiTracer parent, SpanName spanName, OperationType operationType) { + List children = new ArrayList<>(apiTracerFactories.size()); + + for (ApiTracerFactory factory : apiTracerFactories) { + children.add(factory.newTracer(parent, spanName, operationType)); + } + return new CompositeTracer(children); + } +} diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseClientImpl.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseClientImpl.java index b2d6b19a528..909d731818f 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseClientImpl.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseClientImpl.java @@ -37,23 +37,37 @@ class DatabaseClientImpl implements DatabaseClient { @VisibleForTesting final SessionPool pool; @VisibleForTesting final MultiplexedSessionDatabaseClient multiplexedSessionDatabaseClient; + final boolean useMultiplexedSessionBlindWrite; + @VisibleForTesting DatabaseClientImpl(SessionPool pool, TraceWrapper tracer) { - this("", pool, /* multiplexedSessionDatabaseClient = */ null, tracer); + this( + "", + pool, + /* useMultiplexedSessionBlindWrite = */ false, + /* multiplexedSessionDatabaseClient = */ null, + tracer); } @VisibleForTesting DatabaseClientImpl(String clientId, SessionPool pool, TraceWrapper tracer) { - this(clientId, pool, /* multiplexedSessionDatabaseClient = */ null, tracer); + this( + clientId, + pool, + /* useMultiplexedSessionBlindWrite = */ false, + /* multiplexedSessionDatabaseClient = */ null, + tracer); } DatabaseClientImpl( String clientId, SessionPool pool, + boolean useMultiplexedSessionBlindWrite, @Nullable MultiplexedSessionDatabaseClient multiplexedSessionDatabaseClient, TraceWrapper tracer) { this.clientId = clientId; this.pool = pool; + this.useMultiplexedSessionBlindWrite = useMultiplexedSessionBlindWrite; this.multiplexedSessionDatabaseClient = multiplexedSessionDatabaseClient; this.tracer = tracer; } @@ -65,13 +79,21 @@ PooledSessionFuture getSession() { @VisibleForTesting DatabaseClient getMultiplexedSession() { - if (this.multiplexedSessionDatabaseClient != null - && this.multiplexedSessionDatabaseClient.isMultiplexedSessionsSupported()) { + if (canUseMultiplexedSessions()) { return this.multiplexedSessionDatabaseClient; } return pool.getMultiplexedSessionWithFallback(); } + private MultiplexedSessionDatabaseClient getMultiplexedSessionDatabaseClient() { + return canUseMultiplexedSessions() ? this.multiplexedSessionDatabaseClient : null; + } + + private boolean canUseMultiplexedSessions() { + return this.multiplexedSessionDatabaseClient != null + && this.multiplexedSessionDatabaseClient.isMultiplexedSessionsSupported(); + } + @Override public Dialect getDialect() { return pool.getDialect(); @@ -92,7 +114,7 @@ public Timestamp write(final Iterable mutations) throws SpannerExcepti public CommitResponse writeWithOptions( final Iterable mutations, final TransactionOption... options) throws SpannerException { - ISpan span = tracer.spanBuilder(READ_WRITE_TRANSACTION); + ISpan span = tracer.spanBuilder(READ_WRITE_TRANSACTION, options); try (IScope s = tracer.withSpan(span)) { return runWithSessionRetry(session -> session.writeWithOptions(mutations, options)); } catch (RuntimeException e) { @@ -112,8 +134,12 @@ public Timestamp writeAtLeastOnce(final Iterable mutations) throws Spa public CommitResponse writeAtLeastOnceWithOptions( final Iterable mutations, final TransactionOption... options) throws SpannerException { - ISpan span = tracer.spanBuilder(READ_WRITE_TRANSACTION); + ISpan span = tracer.spanBuilder(READ_WRITE_TRANSACTION, options); try (IScope s = tracer.withSpan(span)) { + if (useMultiplexedSessionBlindWrite && getMultiplexedSessionDatabaseClient() != null) { + return getMultiplexedSessionDatabaseClient() + .writeAtLeastOnceWithOptions(mutations, options); + } return runWithSessionRetry( session -> session.writeAtLeastOnceWithOptions(mutations, options)); } catch (RuntimeException e) { @@ -128,7 +154,7 @@ public CommitResponse writeAtLeastOnceWithOptions( public ServerStream batchWriteAtLeastOnce( final Iterable mutationGroups, final TransactionOption... options) throws SpannerException { - ISpan span = tracer.spanBuilder(READ_WRITE_TRANSACTION); + ISpan span = tracer.spanBuilder(READ_WRITE_TRANSACTION, options); try (IScope s = tracer.withSpan(span)) { return runWithSessionRetry(session -> session.batchWriteAtLeastOnce(mutationGroups, options)); } catch (RuntimeException e) { @@ -213,7 +239,7 @@ public ReadOnlyTransaction readOnlyTransaction(TimestampBound bound) { @Override public TransactionRunner readWriteTransaction(TransactionOption... options) { - ISpan span = tracer.spanBuilder(READ_WRITE_TRANSACTION); + ISpan span = tracer.spanBuilder(READ_WRITE_TRANSACTION, options); try (IScope s = tracer.withSpan(span)) { return getSession().readWriteTransaction(options); } catch (RuntimeException e) { @@ -225,7 +251,7 @@ public TransactionRunner readWriteTransaction(TransactionOption... options) { @Override public TransactionManager transactionManager(TransactionOption... options) { - ISpan span = tracer.spanBuilder(READ_WRITE_TRANSACTION); + ISpan span = tracer.spanBuilder(READ_WRITE_TRANSACTION, options); try (IScope s = tracer.withSpan(span)) { return getSession().transactionManager(options); } catch (RuntimeException e) { @@ -237,7 +263,7 @@ public TransactionManager transactionManager(TransactionOption... options) { @Override public AsyncRunner runAsync(TransactionOption... options) { - ISpan span = tracer.spanBuilder(READ_WRITE_TRANSACTION); + ISpan span = tracer.spanBuilder(READ_WRITE_TRANSACTION, options); try (IScope s = tracer.withSpan(span)) { return getSession().runAsync(options); } catch (RuntimeException e) { @@ -249,7 +275,7 @@ public AsyncRunner runAsync(TransactionOption... options) { @Override public AsyncTransactionManager transactionManagerAsync(TransactionOption... options) { - ISpan span = tracer.spanBuilder(READ_WRITE_TRANSACTION); + ISpan span = tracer.spanBuilder(READ_WRITE_TRANSACTION, options); try (IScope s = tracer.withSpan(span)) { return getSession().transactionManagerAsync(options); } catch (RuntimeException e) { diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedMultiplexedSessionTransaction.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedMultiplexedSessionTransaction.java index 928927d49a0..36750eaccd1 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedMultiplexedSessionTransaction.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedMultiplexedSessionTransaction.java @@ -22,7 +22,9 @@ import com.google.api.core.ApiFutures; import com.google.cloud.spanner.DelayedReadContext.DelayedReadOnlyTransaction; import com.google.cloud.spanner.MultiplexedSessionDatabaseClient.MultiplexedSessionTransaction; +import com.google.cloud.spanner.Options.TransactionOption; import com.google.common.util.concurrent.MoreExecutors; +import java.util.concurrent.ExecutionException; /** * Represents a delayed execution of a transaction on a multiplexed session. The execution is @@ -119,4 +121,37 @@ public ReadOnlyTransaction readOnlyTransaction(TimestampBound bound) { .readOnlyTransaction(bound), MoreExecutors.directExecutor())); } + + /** + * This is a blocking method, as the interface that it implements is also defined as a blocking + * method. + */ + @Override + public CommitResponse writeAtLeastOnceWithOptions( + Iterable mutations, TransactionOption... options) throws SpannerException { + SessionReference sessionReference = getSessionReference(); + try (MultiplexedSessionTransaction transaction = + new MultiplexedSessionTransaction(client, span, sessionReference, NO_CHANNEL_HINT, true)) { + return transaction.writeAtLeastOnceWithOptions(mutations, options); + } + } + + /** + * Gets the session reference that this delayed transaction is waiting for. This method should + * only be called by methods that are allowed to be blocking. + */ + private SessionReference getSessionReference() { + try { + return this.sessionFuture.get(); + } catch (ExecutionException executionException) { + // Propagate the underlying exception as a RuntimeException (SpannerException is also a + // RuntimeException). + if (executionException.getCause() instanceof RuntimeException) { + throw (RuntimeException) executionException.getCause(); + } + throw SpannerExceptionFactory.asSpannerException(executionException.getCause()); + } catch (InterruptedException interruptedException) { + throw SpannerExceptionFactory.propagateInterrupt(interruptedException); + } + } } diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedReadContext.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedReadContext.java index 752c41cdea3..62bc5711852 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedReadContext.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DelayedReadContext.java @@ -44,6 +44,11 @@ T getReadContext() { try { return this.readContextFuture.get(); } catch (ExecutionException executionException) { + // Propagate the underlying exception as a RuntimeException (SpannerException is also a + // RuntimeException). + if (executionException.getCause() instanceof RuntimeException) { + throw (RuntimeException) executionException.getCause(); + } throw SpannerExceptionFactory.asSpannerException(executionException.getCause()); } catch (InterruptedException interruptedException) { throw SpannerExceptionFactory.propagateInterrupt(interruptedException); diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ErrorHandler.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ErrorHandler.java new file mode 100644 index 00000000000..cf2465d7ade --- /dev/null +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ErrorHandler.java @@ -0,0 +1,50 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.BetaApi; +import javax.annotation.Nonnull; + +/** + * The {@link ErrorHandler} interface can be used to implement custom error and retry handling for + * specific cases. The default implementation does nothing and falls back to the standard error and + * retry handling in Gax and the Spanner client. + */ +@BetaApi +interface ErrorHandler { + @Nonnull + Throwable translateException(@Nonnull Throwable exception); + + int getMaxAttempts(); + + class DefaultErrorHandler implements ErrorHandler { + static final DefaultErrorHandler INSTANCE = new DefaultErrorHandler(); + + private DefaultErrorHandler() {} + + @Nonnull + @Override + public Throwable translateException(@Nonnull Throwable exception) { + return exception; + } + + @Override + public int getMaxAttempts() { + return 0; + } + } +} diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/GrpcResultSet.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/GrpcResultSet.java index 7b61901a60e..be75c1e5c4e 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/GrpcResultSet.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/GrpcResultSet.java @@ -25,6 +25,7 @@ import com.google.spanner.v1.ResultSetMetadata; import com.google.spanner.v1.ResultSetStats; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import javax.annotation.Nullable; @@ -35,6 +36,7 @@ class GrpcResultSet extends AbstractResultSet> implements ProtobufR private final DecodeMode decodeMode; private ResultSetMetadata metadata; private GrpcStruct currRow; + private List rowData; private SpannerException error; private ResultSetStats statistics; private boolean closed; @@ -85,7 +87,15 @@ public boolean next() throws SpannerException { throw SpannerExceptionFactory.newSpannerException( ErrorCode.FAILED_PRECONDITION, AbstractReadContext.NO_TRANSACTION_RETURNED_MSG); } - currRow = new GrpcStruct(iterator.type(), new ArrayList<>(), decodeMode); + if (rowData == null) { + rowData = new ArrayList<>(metadata.getRowType().getFieldsCount()); + if (decodeMode != DecodeMode.DIRECT) { + rowData = Collections.synchronizedList(rowData); + } + } else { + rowData.clear(); + } + currRow = new GrpcStruct(iterator.type(), rowData, decodeMode); } boolean hasNext = currRow.consumeRow(iterator); if (!hasNext) { diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/GrpcStreamIterator.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/GrpcStreamIterator.java index dde6b69c461..af6b5683502 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/GrpcStreamIterator.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/GrpcStreamIterator.java @@ -38,7 +38,7 @@ class GrpcStreamIterator extends AbstractIterator private static final Logger logger = Logger.getLogger(GrpcStreamIterator.class.getName()); private static final PartialResultSet END_OF_STREAM = PartialResultSet.newBuilder().build(); - private final ConsumerImpl consumer = new ConsumerImpl(); + private final ConsumerImpl consumer; private final BlockingQueue stream; private final Statement statement; @@ -49,13 +49,15 @@ class GrpcStreamIterator extends AbstractIterator private SpannerException error; @VisibleForTesting - GrpcStreamIterator(int prefetchChunks) { - this(null, prefetchChunks); + GrpcStreamIterator(int prefetchChunks, boolean cancelQueryWhenClientIsClosed) { + this(null, prefetchChunks, cancelQueryWhenClientIsClosed); } @VisibleForTesting - GrpcStreamIterator(Statement statement, int prefetchChunks) { + GrpcStreamIterator( + Statement statement, int prefetchChunks, boolean cancelQueryWhenClientIsClosed) { this.statement = statement; + this.consumer = new ConsumerImpl(cancelQueryWhenClientIsClosed); // One extra to allow for END_OF_STREAM message. this.stream = new LinkedBlockingQueue<>(prefetchChunks + 1); } @@ -136,6 +138,12 @@ private void addToStream(PartialResultSet results) { } private class ConsumerImpl implements SpannerRpc.ResultStreamConsumer { + private final boolean cancelQueryWhenClientIsClosed; + + ConsumerImpl(boolean cancelQueryWhenClientIsClosed) { + this.cancelQueryWhenClientIsClosed = cancelQueryWhenClientIsClosed; + } + @Override public void onPartialResultSet(PartialResultSet results) { addToStream(results); @@ -168,5 +176,10 @@ public void onError(SpannerException e) { error = e; addToStream(END_OF_STREAM); } + + @Override + public boolean cancelQueryWhenClientIsClosed() { + return this.cancelQueryWhenClientIsClosed; + } } } diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/GrpcStruct.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/GrpcStruct.java index 852b9ed61a3..4d07a12880c 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/GrpcStruct.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/GrpcStruct.java @@ -49,6 +49,7 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; import java.util.stream.Collectors; @@ -60,7 +61,7 @@ class GrpcStruct extends Struct implements Serializable { private final List rowData; private final DecodeMode decodeMode; private final BitSet colDecoded; - private boolean rowDecoded; + private final AtomicBoolean rowDecoded; /** * Builds an immutable version of this struct using {@link Struct#newBuilder()} which is used as a @@ -224,7 +225,7 @@ private GrpcStruct( this.type = type; this.rowData = rowData; this.decodeMode = decodeMode; - this.rowDecoded = rowDecoded; + this.rowDecoded = new AtomicBoolean(rowDecoded); this.colDecoded = colDecoded; } @@ -234,29 +235,31 @@ public String toString() { } boolean consumeRow(Iterator iterator) { - rowData.clear(); - if (decodeMode == DecodeMode.LAZY_PER_ROW) { - rowDecoded = false; - } else if (decodeMode == DecodeMode.LAZY_PER_COL) { - colDecoded.clear(); - } - if (!iterator.hasNext()) { - return false; - } - for (Type.StructField fieldType : getType().getStructFields()) { + synchronized (rowData) { + rowData.clear(); + if (decodeMode == DecodeMode.LAZY_PER_ROW) { + rowDecoded.set(false); + } else if (decodeMode == DecodeMode.LAZY_PER_COL) { + colDecoded.clear(); + } if (!iterator.hasNext()) { - throw newSpannerException( - ErrorCode.INTERNAL, - "Invalid value stream: end of stream reached before row is complete"); + return false; } - com.google.protobuf.Value value = iterator.next(); - if (decodeMode == DecodeMode.DIRECT) { - rowData.add(decodeValue(fieldType.getType(), value)); - } else { - rowData.add(value); + for (Type.StructField fieldType : getType().getStructFields()) { + if (!iterator.hasNext()) { + throw newSpannerException( + ErrorCode.INTERNAL, + "Invalid value stream: end of stream reached before row is complete"); + } + com.google.protobuf.Value value = iterator.next(); + if (decodeMode == DecodeMode.DIRECT) { + rowData.add(decodeValue(fieldType.getType(), value)); + } else { + rowData.add(value); + } } + return true; } - return true; } private static Object decodeValue(Type fieldType, com.google.protobuf.Value proto) { @@ -367,12 +370,16 @@ private static void checkType( } Struct immutableCopy() { - return new GrpcStruct( - type, - new ArrayList<>(rowData), - this.decodeMode, - this.rowDecoded, - this.colDecoded == null ? null : (BitSet) this.colDecoded.clone()); + synchronized (rowData) { + return new GrpcStruct( + type, + this.decodeMode == DecodeMode.DIRECT + ? new ArrayList<>(rowData) + : Collections.synchronizedList(new ArrayList<>(rowData)), + this.decodeMode, + this.rowDecoded.get(), + this.colDecoded == null ? null : (BitSet) this.colDecoded.clone()); + } } @Override @@ -382,9 +389,14 @@ public Type getType() { @Override public boolean isNull(int columnIndex) { - if ((decodeMode == DecodeMode.LAZY_PER_ROW && !rowDecoded) - || (decodeMode == DecodeMode.LAZY_PER_COL && !colDecoded.get(columnIndex))) { - return ((com.google.protobuf.Value) rowData.get(columnIndex)).hasNullValue(); + if (decodeMode == DecodeMode.LAZY_PER_ROW || decodeMode == DecodeMode.LAZY_PER_COL) { + synchronized (rowData) { + if ((decodeMode == DecodeMode.LAZY_PER_ROW && !rowDecoded.get()) + || (decodeMode == DecodeMode.LAZY_PER_COL && !colDecoded.get(columnIndex))) { + return ((com.google.protobuf.Value) rowData.get(columnIndex)).hasNullValue(); + } + return rowData.get(columnIndex) == null; + } } return rowData.get(columnIndex) == null; } @@ -496,14 +508,18 @@ private boolean isUnrecognizedType(int columnIndex) { } boolean canGetProtoValue(int columnIndex) { - return isUnrecognizedType(columnIndex) - || (decodeMode == DecodeMode.LAZY_PER_ROW && !rowDecoded) - || (decodeMode == DecodeMode.LAZY_PER_COL && !colDecoded.get(columnIndex)); + synchronized (rowData) { + return isUnrecognizedType(columnIndex) + || (decodeMode == DecodeMode.LAZY_PER_ROW && !rowDecoded.get()) + || (decodeMode == DecodeMode.LAZY_PER_COL && !colDecoded.get(columnIndex)); + } } protected com.google.protobuf.Value getProtoValueInternal(int columnIndex) { - checkProtoValueSupported(columnIndex); - return (com.google.protobuf.Value) rowData.get(columnIndex); + synchronized (rowData) { + checkProtoValueSupported(columnIndex); + return (com.google.protobuf.Value) rowData.get(columnIndex); + } } private void checkProtoValueSupported(int columnIndex) { @@ -515,7 +531,7 @@ private void checkProtoValueSupported(int columnIndex) { decodeMode != DecodeMode.DIRECT, "Getting proto value is not supported when DecodeMode#DIRECT is used."); Preconditions.checkState( - !(decodeMode == DecodeMode.LAZY_PER_ROW && rowDecoded), + !(decodeMode == DecodeMode.LAZY_PER_ROW && rowDecoded.get()), "Getting proto value after the row has been decoded is not supported."); Preconditions.checkState( !(decodeMode == DecodeMode.LAZY_PER_COL && colDecoded.get(columnIndex)), @@ -523,22 +539,48 @@ private void checkProtoValueSupported(int columnIndex) { } private void ensureDecoded(int columnIndex) { - if (decodeMode == DecodeMode.LAZY_PER_ROW && !rowDecoded) { - for (int i = 0; i < rowData.size(); i++) { - rowData.set( - i, - decodeValue( - type.getStructFields().get(i).getType(), - (com.google.protobuf.Value) rowData.get(i))); + if (decodeMode == DecodeMode.LAZY_PER_ROW) { + synchronized (rowData) { + if (!rowDecoded.get()) { + for (int i = 0; i < rowData.size(); i++) { + rowData.set( + i, + decodeValue( + type.getStructFields().get(i).getType(), + (com.google.protobuf.Value) rowData.get(i))); + } + } + rowDecoded.set(true); + } + } else if (decodeMode == DecodeMode.LAZY_PER_COL) { + boolean decoded; + Object value; + synchronized (rowData) { + decoded = colDecoded.get(columnIndex); + value = rowData.get(columnIndex); + } + if (!decoded) { + // Use the column as a lock during decoding to ensure that we decode once (mostly), but also + // that multiple different columns can be decoded in parallel if requested. + synchronized (type.getStructFields().get(columnIndex)) { + // Note: It can be that we decode the value twice if two threads request this at the same + // time, but the synchronization on rowData above and below makes sure that we always get + // and set a consistent value (and only set it once). + if (!colDecoded.get(columnIndex)) { + value = + decodeValue( + type.getStructFields().get(columnIndex).getType(), + (com.google.protobuf.Value) value); + decoded = true; + } + } + if (decoded) { + synchronized (rowData) { + rowData.set(columnIndex, value); + colDecoded.set(columnIndex); + } + } } - rowDecoded = true; - } else if (decodeMode == DecodeMode.LAZY_PER_COL && !colDecoded.get(columnIndex)) { - rowData.set( - columnIndex, - decodeValue( - type.getStructFields().get(columnIndex).getType(), - (com.google.protobuf.Value) rowData.get(columnIndex))); - colDecoded.set(columnIndex); } } diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceInfo.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceInfo.java index ebe0514adf2..e0381952989 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceInfo.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceInfo.java @@ -24,6 +24,7 @@ import com.google.common.collect.ImmutableMap; import com.google.protobuf.FieldMask; import com.google.spanner.admin.instance.v1.AutoscalingConfig; +import com.google.spanner.admin.instance.v1.Instance.Edition; import java.util.HashMap; import java.util.Map; import java.util.Objects; @@ -37,6 +38,7 @@ public enum InstanceField implements FieldSelector { NODE_COUNT("node_count"), PROCESSING_UNITS("processing_units"), AUTOSCALING_CONFIG("autoscaling_config"), + EDITION("edition"), LABELS("labels"); static InstanceField[] defaultFieldsToUpdate(InstanceInfo info) { @@ -116,6 +118,10 @@ public Builder setAutoscalingConfig(AutoscalingConfig autoscalingConfig) { throw new UnsupportedOperationException("Unimplemented"); } + public Builder setEdition(Edition edition) { + throw new UnsupportedOperationException("Unimplemented"); + } + public abstract Builder setState(State state); public abstract Builder addLabel(String key, String value); @@ -132,6 +138,7 @@ static class BuilderImpl extends Builder { private int nodeCount; private int processingUnits; private AutoscalingConfig autoscalingConfig; + private Edition edition; private State state; private Map labels; private Timestamp updateTime; @@ -153,6 +160,7 @@ static class BuilderImpl extends Builder { this.labels = new HashMap<>(instance.labels); this.updateTime = instance.updateTime; this.createTime = instance.createTime; + this.edition = instance.edition; } @Override @@ -197,6 +205,12 @@ public BuilderImpl setAutoscalingConfig(AutoscalingConfig autoscalingConfig) { return this; } + @Override + public BuilderImpl setEdition(Edition edition) { + this.edition = edition; + return this; + } + @Override public BuilderImpl setState(State state) { this.state = state; @@ -227,6 +241,7 @@ public InstanceInfo build() { private final int nodeCount; private final int processingUnits; private final AutoscalingConfig autoscalingConfig; + private final Edition edition; private final State state; private final ImmutableMap labels; private final Timestamp updateTime; @@ -239,6 +254,7 @@ public InstanceInfo build() { this.nodeCount = builder.nodeCount; this.processingUnits = builder.processingUnits; this.autoscalingConfig = builder.autoscalingConfig; + this.edition = builder.edition; this.state = builder.state; this.labels = ImmutableMap.copyOf(builder.labels); this.updateTime = builder.updateTime; @@ -283,6 +299,10 @@ public AutoscalingConfig getAutoscalingConfig() { return autoscalingConfig; } + public Edition getEdition() { + return edition; + } + /** Returns the current state of the instance. */ public State getState() { return state; @@ -306,6 +326,7 @@ public String toString() { .add("nodeCount", nodeCount) .add("processingUnits", processingUnits) .add("autoscaling_config", autoscalingConfig) + .add("edition", edition) .add("state", state) .add("labels", labels) .add("createTime", createTime) @@ -328,6 +349,7 @@ public boolean equals(Object o) { && nodeCount == that.nodeCount && processingUnits == that.processingUnits && Objects.equals(autoscalingConfig, that.autoscalingConfig) + && edition == that.edition && state == that.state && Objects.equals(labels, that.labels) && Objects.equals(updateTime, that.updateTime) @@ -343,6 +365,7 @@ public int hashCode() { nodeCount, processingUnits, autoscalingConfig, + edition, state, labels, updateTime, @@ -365,6 +388,9 @@ com.google.spanner.admin.instance.v1.Instance toProto() { if (getAutoscalingConfig() != null) { builder.setAutoscalingConfig(getAutoscalingConfig()); } + if (getEdition() != null) { + builder.setEdition(getEdition()); + } return builder.build(); } diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/IsChannelShutdownException.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/IsChannelShutdownException.java new file mode 100644 index 00000000000..367d75a13cb --- /dev/null +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/IsChannelShutdownException.java @@ -0,0 +1,50 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.rpc.UnavailableException; +import com.google.common.base.Predicate; +import io.grpc.Status.Code; +import io.grpc.StatusRuntimeException; + +/** + * Predicate that checks whether an exception is a ChannelShutdownException. This exception is + * thrown by gRPC if the underlying gRPC stub has been shut down and uses the UNAVAILABLE error + * code. This means that it would normally be retried by the Spanner client, but this specific + * UNAVAILABLE error should not be retried, as it would otherwise directly return the same error. + */ +class IsChannelShutdownException implements Predicate { + + @Override + public boolean apply(Throwable input) { + Throwable cause = input; + do { + if (isUnavailableError(cause) + && (cause.getMessage().contains("Channel shutdown invoked") + || cause.getMessage().contains("Channel shutdownNow invoked"))) { + return true; + } + } while ((cause = cause.getCause()) != null); + return false; + } + + private boolean isUnavailableError(Throwable cause) { + return (cause instanceof UnavailableException) + || (cause instanceof StatusRuntimeException + && ((StatusRuntimeException) cause).getStatus().getCode() == Code.UNAVAILABLE); + } +} diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/LatencyTest.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/LatencyTest.java new file mode 100644 index 00000000000..4f70c32d2b4 --- /dev/null +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/LatencyTest.java @@ -0,0 +1,74 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner; + +import com.google.auth.oauth2.GoogleCredentials; +import com.google.cloud.spanner.SpannerOptions.FixedCloseableExecutorProvider; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadLocalRandom; +import org.threeten.bp.Duration; + +public class LatencyTest { + + public static void main(String[] args) throws Exception { + ThreadFactory threadFactory = + ThreadFactoryUtil.tryCreateVirtualThreadFactory("spanner-async-worker"); + if (threadFactory == null) { + return; + } + ScheduledExecutorService service = Executors.newScheduledThreadPool(0, threadFactory); + Spanner spanner = + SpannerOptions.newBuilder() + .setCredentials( + GoogleCredentials.fromStream( + Files.newInputStream( + Paths.get("/Users/loite/Downloads/appdev-soda-spanner-staging.json")))) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setWaitForMinSessions(Duration.ofSeconds(5L)) + // .setUseMultiplexedSession(true) + .build()) + .setUseVirtualThreads(true) + .setAsyncExecutorProvider(FixedCloseableExecutorProvider.create(service)) + .build() + .getService(); + DatabaseClient client = + spanner.getDatabaseClient( + DatabaseId.of("appdev-soda-spanner-staging", "knut-test-ycsb", "latencytest")); + for (int i = 0; i < 1000000; i++) { + try (AsyncResultSet resultSet = + client + .singleUse() + .executeQueryAsync( + Statement.newBuilder("select col_varchar from latency_test where col_bigint=$1") + .bind("p1") + .to(ThreadLocalRandom.current().nextLong(100000L)) + .build())) { + while (resultSet.next()) { + for (int col = 0; col < resultSet.getColumnCount(); col++) { + if (resultSet.getValue(col) == null) { + throw new IllegalStateException(); + } + } + } + } + } + } +} diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/MultiplexedSessionDatabaseClient.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/MultiplexedSessionDatabaseClient.java index e742481be2c..81415e80d25 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/MultiplexedSessionDatabaseClient.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/MultiplexedSessionDatabaseClient.java @@ -21,6 +21,7 @@ import com.google.api.core.ApiFuture; import com.google.api.core.ApiFutures; import com.google.api.core.SettableApiFuture; +import com.google.cloud.spanner.Options.TransactionOption; import com.google.cloud.spanner.SessionClient.SessionConsumer; import com.google.cloud.spanner.SpannerException.ResourceNotFoundException; import com.google.common.annotations.VisibleForTesting; @@ -107,6 +108,14 @@ void onReadDone() { } } + @Override + public CommitResponse writeAtLeastOnceWithOptions( + Iterable mutations, TransactionOption... options) throws SpannerException { + CommitResponse response = super.writeAtLeastOnceWithOptions(mutations, options); + onTransactionDone(); + return response; + } + @Override void onTransactionDone() { boolean markedDone = false; @@ -358,6 +367,13 @@ private int getSingleUseChannelHint() { } } + @Override + public CommitResponse writeAtLeastOnceWithOptions( + Iterable mutations, TransactionOption... options) throws SpannerException { + return createMultiplexedSessionTransaction(true) + .writeAtLeastOnceWithOptions(mutations, options); + } + @Override public ReadContext singleUse() { return createMultiplexedSessionTransaction(true).singleUse(); @@ -390,11 +406,14 @@ public ReadOnlyTransaction readOnlyTransaction(TimestampBound bound) { /** * It is enough with one executor to maintain the multiplexed sessions in all the clients, as they - * do not need to be updated often, and the maintenance task is light. + * do not need to be updated often, and the maintenance task is light. The core pool size is set + * to 1 to prevent continuous creating and tearing down threads, and to avoid high CPU usage when + * running on Java 8 due to + * https://bugs.openjdk.org/browse/JDK-8129861. */ private static final ScheduledExecutorService MAINTAINER_SERVICE = Executors.newScheduledThreadPool( - /* corePoolSize = */ 0, + /* corePoolSize = */ 1, ThreadFactoryUtil.createVirtualOrPlatformDaemonThreadFactory( "multiplexed-session-maintainer", /* tryVirtual = */ false)); diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenTelemetryApiTracer.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenTelemetryApiTracer.java new file mode 100644 index 00000000000..8d28a4b01ce --- /dev/null +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenTelemetryApiTracer.java @@ -0,0 +1,274 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.tracing.ApiTracer; +import com.google.api.gax.tracing.ApiTracerFactory.OperationType; +import com.google.common.base.Preconditions; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.common.AttributesBuilder; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.StatusCode; +import java.util.concurrent.atomic.AtomicLong; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import org.threeten.bp.Duration; + +/** + * {@link com.google.api.gax.tracing.ApiTracer} for use with OpenTelemetry. Based on {@link + * com.google.api.gax.tracing.OpencensusTracer}. + */ +class OpenTelemetryApiTracer implements ApiTracer { + /** The attribute keys that are used by this tracer might change in a future release. */ + private final AttributeKey ATTEMPT_COUNT_KEY = AttributeKey.longKey("attempt.count"); + + private final AttributeKey TOTAL_REQUEST_COUNT_KEY = + AttributeKey.longKey("total_request_count"); + private final AttributeKey TOTAL_RESPONSE_COUNT_KEY = + AttributeKey.longKey("total_response_count"); + private final AttributeKey EXCEPTION_MESSAGE_KEY = + AttributeKey.stringKey("exception.message"); + private final AttributeKey ATTEMPT_NUMBER_KEY = AttributeKey.longKey("attempt.number"); + private final AttributeKey ATTEMPT_REQUEST_COUNT_KEY = + AttributeKey.longKey("attempt.request_count"); + private final AttributeKey ATTEMPT_RESPONSE_COUNT_KEY = + AttributeKey.longKey("attempt.response_count"); + private final AttributeKey CONNECTION_ID_KEY = AttributeKey.stringKey("connection"); + private final AttributeKey RETRY_DELAY_KEY = AttributeKey.longKey("delay_ms"); + private static final AttributeKey BATCH_SIZE_KEY = AttributeKey.longKey("batch.size"); + private static final AttributeKey BATCH_COUNT_KEY = AttributeKey.longKey("batch.count"); + + private final Span span; + private final OperationType operationType; + + private volatile String lastConnectionId; + private volatile long currentAttemptId; + private final AtomicLong attemptSentMessages = new AtomicLong(0); + private long attemptReceivedMessages = 0; + private final AtomicLong totalSentMessages = new AtomicLong(0); + private long totalReceivedMessages = 0; + + OpenTelemetryApiTracer(@Nonnull Span span, @Nonnull OperationType operationType) { + this.span = Preconditions.checkNotNull(span); + this.operationType = Preconditions.checkNotNull(operationType); + } + + Span getSpan() { + return this.span; + } + + @Override + public Scope inScope() { + final io.opentelemetry.context.Scope openTelemetryScope = span.makeCurrent(); + return openTelemetryScope::close; + } + + @Override + public void operationSucceeded() { + span.setAllAttributes(baseOperationAttributes()); + span.setStatus(StatusCode.OK); + span.end(); + } + + @Override + public void operationCancelled() { + span.setAllAttributes(baseOperationAttributes()); + span.setStatus(StatusCode.ERROR, "Cancelled by caller"); + span.end(); + } + + @Override + public void operationFailed(Throwable error) { + span.setAllAttributes(baseOperationAttributes()); + span.setStatus(StatusCode.ERROR, error.getMessage()); + span.end(); + } + + @Override + public void lroStartFailed(Throwable error) { + span.addEvent( + "Operation failed to start", Attributes.of(EXCEPTION_MESSAGE_KEY, error.getMessage())); + span.setStatus(StatusCode.ERROR, error.getMessage()); + span.end(); + } + + @Override + public void lroStartSucceeded() { + span.addEvent("Operation started"); + } + + @Override + public void connectionSelected(String id) { + lastConnectionId = id; + } + + @Override + public void attemptStarted(int attemptNumber) { + attemptStarted(null, attemptNumber); + } + + @Override + public void attemptStarted(@Nullable Object request, int attemptNumber) { + currentAttemptId = attemptNumber; + attemptSentMessages.set(0); + attemptReceivedMessages = 0; + + // Attempts start counting a zero, so more than zero indicates a retry. + if (attemptNumber > 0 && operationType != OperationType.LongRunning) { + // Add an event if the RPC retries, as this is otherwise transparent to the user. Retries + // would then show up as higher latency without any logical explanation. + span.addEvent("Starting RPC retry " + attemptNumber); + } else if (operationType == OperationType.LongRunning) { + span.addEvent("Starting poll attempt " + attemptNumber); + } + } + + @Override + public void attemptSucceeded() { + Attributes attributes = baseAttemptAttributes(); + + // Same infrastructure is used for both polling and retries, so need to disambiguate it here. + if (operationType == OperationType.LongRunning) { + span.addEvent("Polling completed", attributes); + } else { + span.addEvent("Attempt succeeded", attributes); + } + } + + @Override + public void attemptCancelled() { + Attributes attributes = baseAttemptAttributes(); + + // Same infrastructure is used for both polling and retries, so need to disambiguate it here. + if (operationType == OperationType.LongRunning) { + span.addEvent("Polling was cancelled", attributes); + } else { + span.addEvent("Attempt cancelled", attributes); + } + lastConnectionId = null; + } + + @Override + public void attemptFailed(Throwable error, Duration delay) { + AttributesBuilder builder = baseAttemptAttributesBuilder(); + if (delay != null) { + builder.put(RETRY_DELAY_KEY, delay.toMillis()); + } + if (error != null) { + builder.put(EXCEPTION_MESSAGE_KEY, error.getMessage()); + } + Attributes attributes = builder.build(); + + // Same infrastructure is used for both polling and retries, so need to disambiguate it here. + if (operationType == OperationType.LongRunning) { + // The poll RPC was successful, but it indicated that the operation is still running. + span.addEvent("Scheduling next poll", attributes); + } else { + span.addEvent("Attempt failed, scheduling next attempt", attributes); + } + lastConnectionId = null; + } + + @Override + public void attemptFailedRetriesExhausted(@Nonnull Throwable error) { + AttributesBuilder builder = baseAttemptAttributesBuilder(); + builder.put(EXCEPTION_MESSAGE_KEY, error.getMessage()); + Attributes attributes = builder.build(); + + // Same infrastructure is used for both polling and retries, so need to disambiguate it here. + if (operationType == OperationType.LongRunning) { + span.addEvent("Polling attempts exhausted", attributes); + } else { + span.addEvent("Attempts exhausted", attributes); + } + lastConnectionId = null; + } + + @Override + public void attemptPermanentFailure(@Nonnull Throwable error) { + AttributesBuilder builder = baseAttemptAttributesBuilder(); + builder.put(EXCEPTION_MESSAGE_KEY, error.getMessage()); + Attributes attributes = builder.build(); + + // Same infrastructure is used for both polling and retries, so need to disambiguate it here. + if (operationType == OperationType.LongRunning) { + span.addEvent("Polling failed", attributes); + } else { + span.addEvent("Attempt failed, error not retryable", attributes); + } + lastConnectionId = null; + } + + @Override + public void responseReceived() { + attemptReceivedMessages++; + totalReceivedMessages++; + } + + @Override + public void requestSent() { + attemptSentMessages.incrementAndGet(); + totalSentMessages.incrementAndGet(); + } + + @Override + public void batchRequestSent(long elementCount, long requestSize) { + span.setAllAttributes( + Attributes.of(BATCH_COUNT_KEY, elementCount, BATCH_SIZE_KEY, requestSize)); + } + + private Attributes baseOperationAttributes() { + AttributesBuilder builder = Attributes.builder(); + builder.put(ATTEMPT_COUNT_KEY, currentAttemptId + 1); + long localTotalSentMessages = totalSentMessages.get(); + if (localTotalSentMessages > 0) { + builder.put(TOTAL_REQUEST_COUNT_KEY, localTotalSentMessages); + } + if (totalReceivedMessages > 0) { + builder.put(TOTAL_RESPONSE_COUNT_KEY, totalReceivedMessages); + } + return builder.build(); + } + + private Attributes baseAttemptAttributes() { + return baseAttemptAttributesBuilder().build(); + } + + private AttributesBuilder baseAttemptAttributesBuilder() { + AttributesBuilder builder = Attributes.builder(); + populateAttemptNumber(builder); + + long localAttemptSentMessages = attemptSentMessages.get(); + if (localAttemptSentMessages > 0) { + builder.put(ATTEMPT_REQUEST_COUNT_KEY, localAttemptSentMessages); + } + if (attemptReceivedMessages > 0) { + builder.put(ATTEMPT_RESPONSE_COUNT_KEY, attemptReceivedMessages); + } + String localLastConnectionId = lastConnectionId; + if (localLastConnectionId != null) { + builder.put(CONNECTION_ID_KEY, localLastConnectionId); + } + + return builder; + } + + private void populateAttemptNumber(AttributesBuilder builder) { + builder.put(ATTEMPT_NUMBER_KEY, currentAttemptId); + } +} diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenTelemetryApiTracerFactory.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenTelemetryApiTracerFactory.java new file mode 100644 index 00000000000..7c66c3239e2 --- /dev/null +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenTelemetryApiTracerFactory.java @@ -0,0 +1,60 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.gax.tracing.ApiTracer; +import com.google.api.gax.tracing.ApiTracerFactory; +import com.google.api.gax.tracing.SpanName; +import com.google.common.base.Preconditions; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.context.Context; +import javax.annotation.Nonnull; + +/** {@link ApiTracerFactory} that can be used with OpenTelemetry tracing. */ +class OpenTelemetryApiTracerFactory implements ApiTracerFactory { + @Nonnull private final Tracer internalTracer; + @Nonnull private final Attributes spanAttributes; + + OpenTelemetryApiTracerFactory( + @Nonnull Tracer internalTracer, @Nonnull Attributes spanAttributes) { + this.internalTracer = Preconditions.checkNotNull(internalTracer); + this.spanAttributes = spanAttributes; + } + + @Override + public ApiTracer newTracer(ApiTracer parent, SpanName spanName, OperationType operationType) { + // Default to the current in context span. This is used for outermost tracers that inherit + // the caller's parent span. + Span parentSpan = Span.current(); + + // If an outer callable started a span, use it as the parent. + if (parent instanceof OpenTelemetryApiTracer) { + parentSpan = ((OpenTelemetryApiTracer) parent).getSpan(); + } + + Span span = + internalTracer + .spanBuilder(spanName.toString()) + .setParent(Context.current().with(parentSpan)) + .setAllAttributes(spanAttributes) + .startSpan(); + + return new OpenTelemetryApiTracer(span, operationType); + } +} diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenTelemetryContextKeys.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenTelemetryContextKeys.java new file mode 100644 index 00000000000..e5fbedb7c37 --- /dev/null +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/OpenTelemetryContextKeys.java @@ -0,0 +1,30 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.InternalApi; +import io.opentelemetry.context.ContextKey; + +/** + * Keys for OpenTelemetry context variables that are used by the Spanner client library. Only + * intended for internal use. + */ +@InternalApi +public class OpenTelemetryContextKeys { + @InternalApi + public static final ContextKey THREAD_NAME_KEY = ContextKey.named("thread.name"); +} diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Options.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Options.java index 3dbd0c1cda3..9c3257586fb 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Options.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Options.java @@ -18,6 +18,7 @@ import com.google.common.base.Preconditions; import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.ReadRequest.OrderBy; import com.google.spanner.v1.RequestOptions.Priority; import java.io.Serializable; import java.time.Duration; @@ -51,6 +52,29 @@ public static RpcPriority fromProto(Priority proto) { } } + /** + * OrderBy for an RPC invocation. The default orderby is {@link #PRIMARY_KEY}. This enum can be + * used to control the order in which rows are returned from a read. + */ + public enum RpcOrderBy { + PRIMARY_KEY(OrderBy.ORDER_BY_PRIMARY_KEY), + NO_ORDER(OrderBy.ORDER_BY_NO_ORDER), + UNSPECIFIED(OrderBy.ORDER_BY_UNSPECIFIED); + + private final OrderBy proto; + + RpcOrderBy(OrderBy proto) { + this.proto = Preconditions.checkNotNull(proto); + } + + public static RpcOrderBy fromProto(OrderBy proto) { + for (RpcOrderBy e : RpcOrderBy.values()) { + if (e.proto.equals(proto)) return e; + } + return RpcOrderBy.UNSPECIFIED; + } + } + /** Marker interface to mark options applicable to both Read and Query operations */ public interface ReadAndQueryOption extends ReadOption, QueryOption {} @@ -131,6 +155,11 @@ public static ReadOption limit(long limit) { return new LimitOption(limit); } + /** Specifies the order_by to use for the RPC. */ + public static ReadOption orderBy(RpcOrderBy orderBy) { + return new OrderByOption(orderBy); + } + /** * Specifying this will allow the client to prefetch up to {@code prefetchChunks} {@code * PartialResultSet} chunks for read and query. The data size of each chunk depends on the server @@ -355,6 +384,10 @@ static final class TagOption extends InternalOption implements ReadQueryUpdateTr this.tag = tag; } + String getTag() { + return tag; + } + @Override void appendToOptions(Options options) { options.tag = tag; @@ -435,6 +468,7 @@ void appendToOptions(Options options) { private Boolean dataBoostEnabled; private DirectedReadOptions directedReadOptions; private DecodeMode decodeMode; + private RpcOrderBy orderBy; // Construction is via factory methods below. private Options() {} @@ -563,6 +597,14 @@ DecodeMode decodeMode() { return decodeMode; } + boolean hasOrderBy() { + return orderBy != null; + } + + OrderBy orderBy() { + return orderBy == null ? null : orderBy.proto; + } + @Override public String toString() { StringBuilder b = new StringBuilder(); @@ -616,6 +658,9 @@ public String toString() { if (decodeMode != null) { b.append("decodeMode: ").append(decodeMode).append(' '); } + if (orderBy != null) { + b.append("orderBy: ").append(orderBy).append(' '); + } return b.toString(); } @@ -654,7 +699,8 @@ public boolean equals(Object o) { && Objects.equals(withOptimisticLock(), that.withOptimisticLock()) && Objects.equals(withExcludeTxnFromChangeStreams(), that.withExcludeTxnFromChangeStreams()) && Objects.equals(dataBoostEnabled(), that.dataBoostEnabled()) - && Objects.equals(directedReadOptions(), that.directedReadOptions()); + && Objects.equals(directedReadOptions(), that.directedReadOptions()) + && Objects.equals(orderBy(), that.orderBy()); } @Override @@ -711,6 +757,9 @@ public int hashCode() { if (decodeMode != null) { result = 31 * result + decodeMode.hashCode(); } + if (orderBy != null) { + result = 31 * result + orderBy.hashCode(); + } return result; } @@ -791,6 +840,19 @@ void appendToOptions(Options options) { } } + static class OrderByOption extends InternalOption implements ReadOption { + private final RpcOrderBy orderBy; + + OrderByOption(RpcOrderBy orderBy) { + this.orderBy = orderBy; + } + + @Override + void appendToOptions(Options options) { + options.orderBy = orderBy; + } + } + static final class DataBoostQueryOption extends InternalOption implements ReadAndQueryOption { private final Boolean dataBoostEnabled; diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/PartitionedDmlTransaction.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/PartitionedDmlTransaction.java index 949265ea28a..82b7f06b7d2 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/PartitionedDmlTransaction.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/PartitionedDmlTransaction.java @@ -221,14 +221,6 @@ private ByteString initTransaction(final Options options) { private void setParameters( final ExecuteSqlRequest.Builder requestBuilder, final Map statementParameters) { - if (!statementParameters.isEmpty()) { - com.google.protobuf.Struct.Builder paramsBuilder = requestBuilder.getParamsBuilder(); - for (Map.Entry param : statementParameters.entrySet()) { - paramsBuilder.putFields(param.getKey(), Value.toProto(param.getValue())); - if (param.getValue() != null && param.getValue().getType() != null) { - requestBuilder.putParamTypes(param.getKey(), param.getValue().getType().toProto()); - } - } - } + AbstractReadContext.addParameters(requestBuilder, statementParameters); } } diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ResumableStreamIterator.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ResumableStreamIterator.java index 590797c0999..3e82ab7d5ff 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ResumableStreamIterator.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/ResumableStreamIterator.java @@ -34,6 +34,7 @@ import com.google.protobuf.ByteString; import com.google.spanner.v1.PartialResultSet; import io.grpc.Context; +import io.opentelemetry.api.common.Attributes; import java.io.IOException; import java.util.LinkedList; import java.util.Objects; @@ -56,10 +57,11 @@ abstract class ResumableStreamIterator extends AbstractIterator { private static final RetrySettings DEFAULT_STREAMING_RETRY_SETTINGS = SpannerStubSettings.newBuilder().executeStreamingSqlSettings().getRetrySettings(); + private final ErrorHandler errorHandler; private final RetrySettings streamingRetrySettings; private final Set retryableCodes; private static final Logger logger = Logger.getLogger(ResumableStreamIterator.class.getName()); - private final BackOff backOff; + private BackOff backOff; private final LinkedList buffer = new LinkedList<>(); private final int maxBufferSize; private final ISpan span; @@ -79,15 +81,36 @@ protected ResumableStreamIterator( String streamName, ISpan parent, TraceWrapper tracer, + ErrorHandler errorHandler, + RetrySettings streamingRetrySettings, + Set retryableCodes) { + this( + maxBufferSize, + streamName, + parent, + tracer, + Attributes.empty(), + errorHandler, + streamingRetrySettings, + retryableCodes); + } + + protected ResumableStreamIterator( + int maxBufferSize, + String streamName, + ISpan parent, + TraceWrapper tracer, + Attributes attributes, + ErrorHandler errorHandler, RetrySettings streamingRetrySettings, Set retryableCodes) { checkArgument(maxBufferSize >= 0); this.maxBufferSize = maxBufferSize; this.tracer = tracer; - this.span = tracer.spanBuilderWithExplicitParent(streamName, parent); + this.span = tracer.spanBuilderWithExplicitParent(streamName, parent, attributes); + this.errorHandler = errorHandler; this.streamingRetrySettings = Preconditions.checkNotNull(streamingRetrySettings); this.retryableCodes = Preconditions.checkNotNull(retryableCodes); - this.backOff = newBackOff(); } private ExponentialBackOff newBackOff() { @@ -175,6 +198,14 @@ public void execute(Runnable command) { abstract CloseableIterator startStream(@Nullable ByteString resumeToken); + /** + * Prepares the iterator for a retry on a different gRPC channel. Returns true if that is + * possible, and false otherwise. A retry should only be attempted if the method returns true. + */ + boolean prepareIteratorForRetryOnDifferentGrpcChannel() { + return false; + } + @Override public void close(@Nullable String message) { if (stream != null) { @@ -191,6 +222,7 @@ public boolean isWithBeginTransaction() { @Override protected PartialResultSet computeNext() { + int numAttemptsOnOtherChannel = 0; Context context = Context.current(); while (true) { // Eagerly start stream before consuming any buffered items. @@ -252,12 +284,26 @@ protected PartialResultSet computeNext() { if (delay != -1) { backoffSleep(context, delay); } else { - backoffSleep(context, backOff); + if (this.backOff == null) { + this.backOff = newBackOff(); + } + backoffSleep(context, this.backOff); } } continue; } + // Check if we should retry the request on a different gRPC channel. + if (resumeToken == null && buffer.isEmpty()) { + Throwable translated = errorHandler.translateException(spannerException); + if (translated instanceof RetryOnDifferentGrpcChannelException) { + if (++numAttemptsOnOtherChannel < errorHandler.getMaxAttempts() + && prepareIteratorForRetryOnDifferentGrpcChannel()) { + stream = null; + continue; + } + } + } span.addAnnotation("Stream broken. Not safe to retry", spannerException); span.setStatus(spannerException); throw spannerException; diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/RetryOnDifferentGrpcChannelErrorHandler.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/RetryOnDifferentGrpcChannelErrorHandler.java new file mode 100644 index 00000000000..46607d33a88 --- /dev/null +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/RetryOnDifferentGrpcChannelErrorHandler.java @@ -0,0 +1,83 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.SessionImpl.NO_CHANNEL_HINT; + +import com.google.api.core.BetaApi; +import com.google.cloud.spanner.spi.v1.SpannerRpc.Option; +import javax.annotation.Nonnull; + +/** + * An experimental error handler that allows DEADLINE_EXCEEDED errors to be retried on a different + * gRPC channel. This handler is only used if the system property + * 'spanner.retry_deadline_exceeded_on_different_channel' has been set to true, and it is only used + * in the following specific cases: + * + *
    + *
  1. A DEADLINE_EXCEEDED error during a read/write transaction. The error is translated to a + * {@link RetryOnDifferentGrpcChannelException}, which is caught by the session pool and + * causes a retry of the entire transaction on a different session and different gRPC channel. + *
  2. A DEADLINE_EXCEEDED error during a single-use read-only transaction using a multiplexed + * session. Note that errors for the same using a regular session are not retried. + *
+ */ +@BetaApi +class RetryOnDifferentGrpcChannelErrorHandler implements ErrorHandler { + private final int maxAttempts; + + private final SessionImpl session; + + static boolean isEnabled() { + return Boolean.parseBoolean( + System.getProperty("spanner.retry_deadline_exceeded_on_different_channel", "false")); + } + + RetryOnDifferentGrpcChannelErrorHandler(int maxAttempts, SessionImpl session) { + this.maxAttempts = maxAttempts; + this.session = session; + } + + @Override + @Nonnull + public Throwable translateException(@Nonnull Throwable exception) { + if (session == null || !(exception instanceof SpannerException)) { + return exception; + } + SpannerException spannerException = (SpannerException) exception; + if (spannerException.getErrorCode() == ErrorCode.DEADLINE_EXCEEDED) { + if (session.getIsMultiplexed() + || (session.getOptions() != null + && session.getOptions().containsKey(Option.CHANNEL_HINT))) { + int channel = NO_CHANNEL_HINT; + if (session.getOptions() != null && session.getOptions().containsKey(Option.CHANNEL_HINT)) { + channel = Option.CHANNEL_HINT.getLong(session.getOptions()).intValue(); + } + return SpannerExceptionFactory.newRetryOnDifferentGrpcChannelException( + "Retrying on a new gRPC channel due to a DEADLINE_EXCEEDED error", + channel, + spannerException); + } + } + return spannerException; + } + + @Override + public int getMaxAttempts() { + return maxAttempts; + } +} diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/RetryOnDifferentGrpcChannelException.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/RetryOnDifferentGrpcChannelException.java new file mode 100644 index 00000000000..59e50ef6a1e --- /dev/null +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/RetryOnDifferentGrpcChannelException.java @@ -0,0 +1,34 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import javax.annotation.Nullable; + +class RetryOnDifferentGrpcChannelException extends SpannerException { + private final int channel; + + RetryOnDifferentGrpcChannelException( + @Nullable String message, int channel, @Nullable Throwable cause) { + // Note: We set retryable=false, as the exception is not retryable in the standard way. + super(DoNotConstructDirectly.ALLOWED, ErrorCode.INTERNAL, /*retryable=*/ false, message, cause); + this.channel = channel; + } + + int getChannel() { + return this.channel; + } +} diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionImpl.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionImpl.java index ab985cebf45..7b9abc71a85 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionImpl.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionImpl.java @@ -26,6 +26,7 @@ import com.google.cloud.spanner.AbstractReadContext.MultiUseReadOnlyTransaction; import com.google.cloud.spanner.AbstractReadContext.SingleReadContext; import com.google.cloud.spanner.AbstractReadContext.SingleUseReadOnlyTransaction; +import com.google.cloud.spanner.ErrorHandler.DefaultErrorHandler; import com.google.cloud.spanner.Options.TransactionOption; import com.google.cloud.spanner.Options.UpdateOption; import com.google.cloud.spanner.SessionClient.SessionOption; @@ -99,6 +100,15 @@ interface SessionTransaction { void close(); } + private static final Map[] CHANNEL_HINT_OPTIONS = + new Map[SpannerOptions.MAX_CHANNELS]; + + static { + for (int i = 0; i < CHANNEL_HINT_OPTIONS.length; i++) { + CHANNEL_HINT_OPTIONS[i] = optionMap(SessionOption.channelHint(i)); + } + } + static final int NO_CHANNEL_HINT = -1; private final SpannerImpl spanner; @@ -107,6 +117,7 @@ interface SessionTransaction { private ISpan currentSpan; private final Clock clock; private final Map options; + private final ErrorHandler errorHandler; SessionImpl(SpannerImpl spanner, SessionReference sessionReference) { this(spanner, sessionReference, NO_CHANNEL_HINT); @@ -118,6 +129,7 @@ interface SessionTransaction { this.sessionReference = sessionReference; this.clock = spanner.getOptions().getSessionPoolOptions().getPoolMaintainerClock(); this.options = createOptions(sessionReference, channelHint); + this.errorHandler = createErrorHandler(spanner.getOptions()); } static Map createOptions( @@ -125,7 +137,14 @@ interface SessionTransaction { if (channelHint == NO_CHANNEL_HINT) { return sessionReference.getOptions(); } - return optionMap(SessionOption.channelHint(channelHint)); + return CHANNEL_HINT_OPTIONS[channelHint % CHANNEL_HINT_OPTIONS.length]; + } + + private ErrorHandler createErrorHandler(SpannerOptions options) { + if (RetryOnDifferentGrpcChannelErrorHandler.isEnabled()) { + return new RetryOnDifferentGrpcChannelErrorHandler(options.getNumChannels(), this); + } + return DefaultErrorHandler.INSTANCE; } @Override @@ -137,6 +156,10 @@ public String getName() { return options; } + ErrorHandler getErrorHandler() { + return this.errorHandler; + } + void setCurrentSpan(ISpan span) { currentSpan = span; } @@ -403,7 +426,8 @@ public void close() { } } - ApiFuture beginTransactionAsync(Options transactionOptions, boolean routeToLeader) { + ApiFuture beginTransactionAsync( + Options transactionOptions, boolean routeToLeader, Map channelHint) { final SettableApiFuture res = SettableApiFuture.create(); final ISpan span = tracer.spanBuilder(SpannerImpl.BEGIN_TRANSACTION); final BeginTransactionRequest request = @@ -411,11 +435,13 @@ ApiFuture beginTransactionAsync(Options transactionOptions, boolean .setSession(getName()) .setOptions(createReadWriteTransactionOptions(transactionOptions)) .build(); - final ApiFuture requestFuture = - spanner.getRpc().beginTransactionAsync(request, getOptions(), routeToLeader); + final ApiFuture requestFuture; + try (IScope ignore = tracer.withSpan(span)) { + requestFuture = spanner.getRpc().beginTransactionAsync(request, channelHint, routeToLeader); + } requestFuture.addListener( () -> { - try (IScope s = tracer.withSpan(span)) { + try (IScope ignore = tracer.withSpan(span)) { Transaction txn = requestFuture.get(); if (txn.getId().isEmpty()) { throw newSpannerException( diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionPool.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionPool.java index f36da57a816..cf50fa44c77 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionPool.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionPool.java @@ -65,9 +65,11 @@ import com.google.cloud.spanner.SpannerImpl.ClosedException; import com.google.cloud.spanner.spi.v1.SpannerRpc; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Function; import com.google.common.base.MoreObjects; import com.google.common.base.Preconditions; +import com.google.common.base.Ticker; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; import com.google.common.collect.ImmutableList; import com.google.common.util.concurrent.ForwardingListenableFuture; import com.google.common.util.concurrent.ForwardingListenableFuture.SimpleForwardingListenableFuture; @@ -107,9 +109,10 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; import java.util.logging.Level; import java.util.logging.Logger; +import javax.annotation.Nonnull; import javax.annotation.Nullable; import javax.annotation.concurrent.GuardedBy; import org.threeten.bp.Duration; @@ -144,14 +147,6 @@ void maybeWaitOnMinSessions() { ErrorCode.DEADLINE_EXCEEDED, "Timed out after waiting " + timeoutMillis + "ms for session pool creation"); } - - if (useMultiplexedSessions() - && !waitOnMultiplexedSessionsLatch.await(timeoutNanos, TimeUnit.NANOSECONDS)) { - final long timeoutMillis = options.getWaitForMinSessions().toMillis(); - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.DEADLINE_EXCEEDED, - "Timed out after waiting " + timeoutMillis + "ms for multiplexed session creation"); - } } catch (InterruptedException e) { throw SpannerExceptionFactory.propagateInterrupt(e); } @@ -241,7 +236,7 @@ public ApiFuture setCallback(Executor exec, ReadyCallback cb) { private AutoClosingReadContext( Function delegateSupplier, SessionPool sessionPool, - SessionReplacementHandler sessionReplacementHandler, + SessionReplacementHandler sessionReplacementHandler, I session, boolean isSingleUse) { this.readContextDelegateSupplier = delegateSupplier; @@ -554,7 +549,7 @@ private static class AutoClosingReadTransaction AutoClosingReadTransaction( Function txnSupplier, SessionPool sessionPool, - SessionReplacementHandler sessionReplacementHandler, + SessionReplacementHandler sessionReplacementHandler, I session, boolean isSingleUse) { super(txnSupplier, sessionPool, sessionReplacementHandler, session, isSingleUse); @@ -568,6 +563,8 @@ public Timestamp getReadTimestamp() { interface SessionReplacementHandler { T replaceSession(SessionNotFoundException notFound, T sessionFuture); + + T denyListSession(RetryOnDifferentGrpcChannelException retryException, T sessionFuture); } class PooledSessionReplacementHandler implements SessionReplacementHandler { @@ -588,22 +585,36 @@ public PooledSessionFuture replaceSession( throw e; } } - } - static class MultiplexedSessionReplacementHandler - implements SessionReplacementHandler { @Override - public MultiplexedSessionFuture replaceSession( - SessionNotFoundException e, MultiplexedSessionFuture session) { - /** - * For multiplexed sessions, we would never obtain a {@link SessionNotFoundException}. Hence, - * this method will ideally never be invoked. - */ - logger.log( - Level.WARNING, - String.format( - "Replace session invoked for multiplexed session => %s", session.getName())); - throw e; + public PooledSessionFuture denyListSession( + RetryOnDifferentGrpcChannelException retryException, PooledSessionFuture session) { + // The feature was not enabled when the session pool was created. + if (denyListedChannels == null) { + throw SpannerExceptionFactory.asSpannerException(retryException.getCause()); + } + + int channel = session.get().getChannel(); + synchronized (lock) { + // Calculate the size manually by iterating over the possible keys. We do this because the + // size of a cache can be stale, and manually checking for each possible key will make sure + // we get the correct value, and it will update the cache. + int currentSize = 0; + for (int i = 0; i < numChannels; i++) { + if (denyListedChannels.getIfPresent(i) != null) { + currentSize++; + } + } + if (currentSize < numChannels - 1) { + denyListedChannels.put(channel, DENY_LISTED); + } else { + // We have now deny-listed all channels. Give up and just throw the original error. + throw SpannerExceptionFactory.asSpannerException(retryException.getCause()); + } + } + session.get().releaseToPosition = Position.LAST; + session.close(); + return getSession(); } } @@ -781,10 +792,13 @@ public ApiFuture bufferAsync(Iterable mutations) { return delegate.bufferAsync(mutations); } + @SuppressWarnings("deprecation") @Override public ResultSetStats analyzeUpdate( Statement statement, QueryAnalyzeMode analyzeMode, UpdateOption... options) { - return analyzeUpdateStatement(statement, analyzeMode, options).getStats(); + try (ResultSet resultSet = analyzeUpdateStatement(statement, analyzeMode, options)) { + return resultSet.getStats(); + } } @Override @@ -870,7 +884,7 @@ private static class AutoClosingTransactionManager AutoClosingTransactionManager( T session, - SessionReplacementHandler sessionReplacementHandler, + SessionReplacementHandler sessionReplacementHandler, TransactionOption... options) { this.session = session; this.options = options; @@ -1000,7 +1014,7 @@ private static final class SessionPoolTransactionRunner private SessionPoolTransactionRunner( I session, - SessionReplacementHandler sessionReplacementHandler, + SessionReplacementHandler sessionReplacementHandler, TransactionOption... options) { this.session = session; this.options = options; @@ -1027,11 +1041,20 @@ public T run(TransactionCallable callable) { session = sessionReplacementHandler.replaceSession(e, session); CachedSession cachedSession = session.get(); runner = cachedSession.getDelegate().readWriteTransaction(); + } catch (RetryOnDifferentGrpcChannelException retryException) { + // This error is thrown by the RetryOnDifferentGrpcChannelErrorHandler in the specific + // case that a transaction failed with a DEADLINE_EXCEEDED error. This is an + // experimental feature that is disabled by default, and that can be removed in a + // future version. + session = sessionReplacementHandler.denyListSession(retryException, session); + CachedSession cachedSession = session.get(); + runner = cachedSession.getDelegate().readWriteTransaction(); } } session.get().markUsed(); return result; } catch (SpannerException e) { + //noinspection ThrowableNotThrown session.get().setLastException(e); throw e; } finally { @@ -1064,7 +1087,7 @@ private static class SessionPoolAsyncRunner implements private SessionPoolAsyncRunner( I session, - SessionReplacementHandler sessionReplacementHandler, + SessionReplacementHandler sessionReplacementHandler, TransactionOption... options) { this.session = session; this.options = options; @@ -1100,7 +1123,6 @@ public ApiFuture runAsync(final AsyncWork work, Executor executor) { session = sessionReplacementHandler.replaceSession( (SessionNotFoundException) se, session); - se = null; } catch (SessionNotFoundException e) { exception = e; break; @@ -1266,39 +1288,6 @@ public PooledSessionFuture get() { } } - class MultiplexedSessionFutureWrapper implements SessionFutureWrapper { - private ISpan span; - private volatile MultiplexedSessionFuture multiplexedSessionFuture; - - public MultiplexedSessionFutureWrapper(ISpan span) { - this.span = span; - } - - @Override - public MultiplexedSessionFuture get() { - if (resourceNotFoundException != null) { - span.addAnnotation("Database has been deleted"); - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.NOT_FOUND, - String.format( - "The session pool has been invalidated because a previous RPC returned 'Database not found': %s", - resourceNotFoundException.getMessage()), - resourceNotFoundException); - } - if (multiplexedSessionFuture == null) { - synchronized (lock) { - if (multiplexedSessionFuture == null) { - // Creating a new reference where the request's span state can be stored. - MultiplexedSessionFuture multiplexedSessionFuture = new MultiplexedSessionFuture(span); - this.multiplexedSessionFuture = multiplexedSessionFuture; - return multiplexedSessionFuture; - } - } - } - return multiplexedSessionFuture; - } - } - interface SessionFuture extends Session { /** @@ -1318,8 +1307,8 @@ class PooledSessionFuture extends SimpleForwardingListenableFuture(this, pooledSessionReplacementHandler, options); } @Override @@ -1563,7 +1552,7 @@ PooledSession get(final boolean eligibleForLongRunning) { res.markBusy(span); span.addAnnotation("Using Session", "sessionId", res.getName()); synchronized (lock) { - incrementNumSessionsInUse(false); + incrementNumSessionsInUse(); checkedOutSessions.add(this); } res.eligibleForLongRunning = eligibleForLongRunning; @@ -1581,247 +1570,6 @@ PooledSession get(final boolean eligibleForLongRunning) { } } - class MultiplexedSessionFuture implements SessionFuture { - - private final ISpan span; - private volatile MultiplexedSession multiplexedSession; - - MultiplexedSessionFuture(ISpan span) { - this.span = span; - } - - @Override - public Timestamp write(Iterable mutations) throws SpannerException { - return writeWithOptions(mutations).getCommitTimestamp(); - } - - @Override - public CommitResponse writeWithOptions( - Iterable mutations, TransactionOption... options) throws SpannerException { - try { - return get().writeWithOptions(mutations, options); - } finally { - close(); - } - } - - @Override - public Timestamp writeAtLeastOnce(Iterable mutations) throws SpannerException { - return writeAtLeastOnceWithOptions(mutations).getCommitTimestamp(); - } - - @Override - public CommitResponse writeAtLeastOnceWithOptions( - Iterable mutations, TransactionOption... options) throws SpannerException { - try { - return get().writeAtLeastOnceWithOptions(mutations, options); - } finally { - close(); - } - } - - @Override - public ServerStream batchWriteAtLeastOnce( - Iterable mutationGroups, TransactionOption... options) - throws SpannerException { - try { - return get().batchWriteAtLeastOnce(mutationGroups, options); - } finally { - close(); - } - } - - @Override - public ReadContext singleUse() { - try { - return new AutoClosingReadContext<>( - session -> { - MultiplexedSession multiplexedSession = session.get(); - return multiplexedSession.getDelegate().singleUse(); - }, - SessionPool.this, - multiplexedSessionReplacementHandler, - this, - true); - } catch (Exception e) { - close(); - throw e; - } - } - - @Override - public ReadContext singleUse(final TimestampBound bound) { - try { - return new AutoClosingReadContext<>( - session -> { - MultiplexedSession multiplexedSession = session.get(); - return multiplexedSession.getDelegate().singleUse(bound); - }, - SessionPool.this, - multiplexedSessionReplacementHandler, - this, - true); - } catch (Exception e) { - close(); - throw e; - } - } - - @Override - public ReadOnlyTransaction singleUseReadOnlyTransaction() { - return internalReadOnlyTransaction( - session -> { - MultiplexedSession multiplexedSession = session.get(); - return multiplexedSession.getDelegate().singleUseReadOnlyTransaction(); - }, - true); - } - - @Override - public ReadOnlyTransaction singleUseReadOnlyTransaction(final TimestampBound bound) { - return internalReadOnlyTransaction( - session -> { - MultiplexedSession multiplexedSession = session.get(); - return multiplexedSession.getDelegate().singleUseReadOnlyTransaction(bound); - }, - true); - } - - @Override - public ReadOnlyTransaction readOnlyTransaction() { - return internalReadOnlyTransaction( - session -> { - MultiplexedSession multiplexedSession = session.get(); - return multiplexedSession.getDelegate().readOnlyTransaction(); - }, - false); - } - - @Override - public ReadOnlyTransaction readOnlyTransaction(final TimestampBound bound) { - return internalReadOnlyTransaction( - session -> { - MultiplexedSession multiplexedSession = session.get(); - return multiplexedSession.getDelegate().readOnlyTransaction(bound); - }, - false); - } - - private ReadOnlyTransaction internalReadOnlyTransaction( - Function transactionSupplier, - boolean isSingleUse) { - try { - return new AutoClosingReadTransaction<>( - transactionSupplier, - SessionPool.this, - multiplexedSessionReplacementHandler, - this, - isSingleUse); - } catch (Exception e) { - close(); - throw e; - } - } - - @Override - public TransactionRunner readWriteTransaction(TransactionOption... options) { - return new SessionPoolTransactionRunner<>( - this, multiplexedSessionReplacementHandler, options); - } - - @Override - public TransactionManager transactionManager(TransactionOption... options) { - return new AutoClosingTransactionManager<>( - this, multiplexedSessionReplacementHandler, options); - } - - @Override - public AsyncRunner runAsync(TransactionOption... options) { - return new SessionPoolAsyncRunner(this, multiplexedSessionReplacementHandler, options); - } - - @Override - public AsyncTransactionManager transactionManagerAsync(TransactionOption... options) { - return new SessionPoolAsyncTransactionManager<>( - multiplexedSessionReplacementHandler, this, options); - } - - @Override - public long executePartitionedUpdate(Statement stmt, UpdateOption... options) { - try { - return get().executePartitionedUpdate(stmt, options); - } finally { - close(); - } - } - - @Override - public String getName() { - return get().getName(); - } - - @Override - public void close() { - try { - asyncClose().get(); - } catch (InterruptedException e) { - throw SpannerExceptionFactory.propagateInterrupt(e); - } catch (ExecutionException e) { - throw asSpannerException(e.getCause()); - } - } - - @Override - public ApiFuture asyncClose() { - MultiplexedSession delegate = getOrNull(); - if (delegate != null) { - return delegate.asyncClose(); - } - return ApiFutures.immediateFuture(Empty.getDefaultInstance()); - } - - private MultiplexedSession getOrNull() { - try { - return get(); - } catch (Throwable ignore) { - // this exception will never be thrown for a multiplexed session since the Future - // object is already initialised. - return null; - } - } - - @Override - public MultiplexedSession get() { - try { - if (multiplexedSession == null) { - boolean created = false; - synchronized (this) { - if (multiplexedSession == null) { - SessionImpl sessionImpl = - new SessionImpl( - sessionClient.getSpanner(), currentMultiplexedSessionReference.get().get()); - MultiplexedSession multiplexedSession = new MultiplexedSession(sessionImpl); - multiplexedSession.markBusy(span); - span.addAnnotation("Using Session", "sessionId", multiplexedSession.getName()); - this.multiplexedSession = multiplexedSession; - created = true; - } - } - if (created) { - synchronized (lock) { - incrementNumSessionsInUse(true); - } - } - } - return multiplexedSession; - } catch (ExecutionException e) { - throw SpannerExceptionFactory.newSpannerException(e.getCause()); - } catch (InterruptedException e) { - throw SpannerExceptionFactory.propagateInterrupt(e); - } - } - } - interface CachedSession extends Session { SessionImpl getDelegate(); @@ -1832,9 +1580,6 @@ interface CachedSession extends Session { SpannerException setLastException(SpannerException exception); - // TODO This method can be removed once we fully migrate to multiplexed sessions. - boolean isAllowReplacing(); - AsyncTransactionManagerImpl transactionManagerAsync(TransactionOption... options); void setAllowReplacing(boolean b); @@ -2024,7 +1769,7 @@ public void close() { if ((lastException != null && isSessionNotFound(lastException)) || isRemovedFromPool) { invalidateSession(this); } else { - if (lastException != null && isDatabaseOrInstanceNotFound(lastException)) { + if (isDatabaseOrInstanceNotFound(lastException)) { // Mark this session pool as no longer valid and then release the session into the pool as // there is nothing we can do with it anyways. synchronized (lock) { @@ -2116,8 +1861,7 @@ public SpannerException setLastException(SpannerException exception) { return exception; } - @Override - public boolean isAllowReplacing() { + boolean isAllowReplacing() { return this.allowReplacing; } @@ -2127,172 +1871,12 @@ public TransactionManager transactionManager(TransactionOption... options) { } } - class MultiplexedSession implements CachedSession { - final SessionImpl delegate; - private volatile SpannerException lastException; - - MultiplexedSession(SessionImpl session) { - this.delegate = session; - } - - @Override - public boolean isAllowReplacing() { - // for multiplexed session there is only 1 session, hence there is nothing that we - // can replace. - return false; - } - - @Override - public void setAllowReplacing(boolean allowReplacing) { - // for multiplexed session there is only 1 session, there is nothing that can be replaced. - // hence this is no-op. - } - - @Override - public void markBusy(ISpan span) { - this.delegate.setCurrentSpan(span); - } - - @Override - public void markUsed() { - // no-op for a multiplexed session since we don't track the last-used time - // in case of multiplexed session - } - - @Override - public SpannerException setLastException(SpannerException exception) { - this.lastException = exception; - return exception; - } - - @Override - public SessionImpl getDelegate() { - return delegate; - } - - @Override - public Timestamp write(Iterable mutations) throws SpannerException { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.UNIMPLEMENTED, "Unimplemented with Multiplexed Session"); - } - - @Override - public CommitResponse writeWithOptions( - Iterable mutations, TransactionOption... options) throws SpannerException { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.UNIMPLEMENTED, "Unimplemented with Multiplexed Session"); - } - - @Override - public Timestamp writeAtLeastOnce(Iterable mutations) throws SpannerException { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.UNIMPLEMENTED, "Unimplemented with Multiplexed Session"); - } - - @Override - public CommitResponse writeAtLeastOnceWithOptions( - Iterable mutations, TransactionOption... options) throws SpannerException { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.UNIMPLEMENTED, "Unimplemented with Multiplexed Session"); - } - - @Override - public ServerStream batchWriteAtLeastOnce( - Iterable mutationGroups, TransactionOption... options) - throws SpannerException { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.UNIMPLEMENTED, "Unimplemented with Multiplexed Session"); - } - - @Override - public ReadContext singleUse() { - return delegate.singleUse(); - } - - @Override - public ReadContext singleUse(TimestampBound bound) { - return delegate.singleUse(bound); - } - - @Override - public ReadOnlyTransaction singleUseReadOnlyTransaction() { - return delegate.singleUseReadOnlyTransaction(); - } - - @Override - public ReadOnlyTransaction singleUseReadOnlyTransaction(TimestampBound bound) { - return delegate.singleUseReadOnlyTransaction(bound); - } - - @Override - public ReadOnlyTransaction readOnlyTransaction() { - return delegate.readOnlyTransaction(); - } - - @Override - public ReadOnlyTransaction readOnlyTransaction(TimestampBound bound) { - return delegate.readOnlyTransaction(bound); - } - - @Override - public TransactionRunner readWriteTransaction(TransactionOption... options) { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.UNIMPLEMENTED, "Unimplemented with Multiplexed Session"); - } - - @Override - public TransactionManager transactionManager(TransactionOption... options) { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.UNIMPLEMENTED, "Unimplemented with Multiplexed Session"); - } - - @Override - public AsyncRunner runAsync(TransactionOption... options) { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.UNIMPLEMENTED, "Unimplemented with Multiplexed Session"); - } - - @Override - public AsyncTransactionManagerImpl transactionManagerAsync(TransactionOption... options) { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.UNIMPLEMENTED, "Unimplemented with Multiplexed Session"); - } - - @Override - public long executePartitionedUpdate(Statement stmt, UpdateOption... options) { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.UNIMPLEMENTED, "Unimplemented with Multiplexed Session"); - } - - @Override - public String getName() { - return delegate.getName(); - } - - @Override - public void close() { - synchronized (lock) { - if (lastException != null && isDatabaseOrInstanceNotFound(lastException)) { - SessionPool.this.resourceNotFoundException = - MoreObjects.firstNonNull( - SessionPool.this.resourceNotFoundException, - (ResourceNotFoundException) lastException); - } - } - } - - @Override - public ApiFuture asyncClose() { - close(); - return ApiFutures.immediateFuture(Empty.getDefaultInstance()); - } - } - private final class WaiterFuture extends ForwardingListenableFuture { private static final long MAX_SESSION_WAIT_TIMEOUT = 240_000L; private final SettableFuture waiter = SettableFuture.create(); @Override + @Nonnull protected ListenableFuture delegate() { return waiter; } @@ -2310,7 +1894,7 @@ public PooledSession get() { long currentTimeout = options.getInitialWaitForSessionTimeoutMillis(); while (true) { ISpan span = tracer.spanBuilder(WAIT_FOR_SESSION); - try (IScope waitScope = tracer.withSpan(span)) { + try (IScope ignore = tracer.withSpan(span)) { PooledSession s = pollUninterruptiblyWithTimeout(currentTimeout, options.getAcquireSessionTimeout()); if (s == null) { @@ -2395,9 +1979,6 @@ private PooledSession pollUninterruptiblyWithTimeout( */ final class PoolMaintainer { - // Delay post which the maintainer will retry creating/replacing the current multiplexed session - private final Duration multiplexedSessionCreationRetryDelay = Duration.ofMinutes(10); - // Length of the window in millis over which we keep track of maximum number of concurrent // sessions in use. private final Duration windowLength = Duration.ofMillis(TimeUnit.MINUTES.toMillis(10)); @@ -2421,8 +2002,6 @@ final class PoolMaintainer { */ @VisibleForTesting Instant lastExecutionTime; - @VisibleForTesting Instant multiplexedSessionReplacementAttemptTime; - /** * The previous numSessionsAcquired seen by the maintainer. This is used to calculate the * transactions per second, which again is used to determine whether to randomize the order of @@ -2440,7 +2019,6 @@ final class PoolMaintainer { void init() { lastExecutionTime = clock.instant(); - multiplexedSessionReplacementAttemptTime = clock.instant(); // Scheduled pool maintenance worker. synchronized (lock) { @@ -2483,7 +2061,6 @@ void maintainPool() { this.prevNumSessionsAcquired = SessionPool.this.numSessionsAcquired; } Instant currTime = clock.instant(); - maintainMultiplexedSession(currTime); removeIdleSessions(currTime); // Now go over all the remaining sessions and see if they need to be kept alive explicitly. keepAliveSessions(currTime); @@ -2506,7 +2083,8 @@ private void removeIdleSessions(Instant currTime) { Iterator iterator = sessions.descendingIterator(); while (iterator.hasNext()) { PooledSession session = iterator.next(); - if (session.delegate.getLastUseTime().isBefore(minLastUseTime)) { + if (session.delegate.getLastUseTime() != null + && session.delegate.getLastUseTime().isBefore(minLastUseTime)) { if (session.state != SessionState.CLOSING) { boolean isRemoved = removeFromPool(session); if (isRemoved) { @@ -2652,44 +2230,6 @@ private void removeLongRunningSessions( } } } - - void maintainMultiplexedSession(Instant currentTime) { - try { - if (useMultiplexedSessions()) { - if (currentMultiplexedSessionReference.get().isDone()) { - SessionReference sessionReference = getMultiplexedSessionInstance(); - if (sessionReference != null - && isMultiplexedSessionStale(sessionReference, currentTime)) { - final Instant minExecutionTime = - multiplexedSessionReplacementAttemptTime.plus( - multiplexedSessionCreationRetryDelay); - if (currentTime.isBefore(minExecutionTime)) { - return; - } - /* - This will attempt to create a new multiplexed session. if successfully created then - the existing session will be replaced. Note that there maybe active transactions - running on the stale session. Hence, it is important that we only replace the reference - and not invoke a DeleteSession RPC. - */ - maybeCreateMultiplexedSession(multiplexedMaintainerConsumer); - - // update this only after we have attempted to replace the multiplexed session - multiplexedSessionReplacementAttemptTime = currentTime; - } - } - } - } catch (final Throwable t) { - logger.log(Level.WARNING, "Failed to maintain multiplexed session", t); - } - } - - boolean isMultiplexedSessionStale(SessionReference sessionReference, Instant currentTime) { - final Duration durationFromCreationTime = - Duration.between(sessionReference.getCreateTime(), currentTime); - return durationFromCreationTime.compareTo(options.getMultiplexedSessionMaintenanceDuration()) - > 0; - } } enum Position { @@ -2754,9 +2294,6 @@ enum Position { @GuardedBy("lock") private ResourceNotFoundException resourceNotFoundException; - @GuardedBy("lock") - private boolean stopAutomaticPrepare; - @GuardedBy("lock") private final LinkedList sessions = new LinkedList<>(); @@ -2766,9 +2303,6 @@ enum Position { @GuardedBy("lock") private int numSessionsBeingCreated = 0; - @GuardedBy("lock") - private boolean multiplexedSessionBeingCreated = false; - @GuardedBy("lock") private int numSessionsInUse = 0; @@ -2790,10 +2324,7 @@ enum Position { @GuardedBy("lock") private long numLeakedSessionsRemoved = 0; - private AtomicLong numWaiterTimeouts = new AtomicLong(); - - private final AtomicReference> - currentMultiplexedSessionReference = new AtomicReference<>(SettableApiFuture.create()); + private final AtomicLong numWaiterTimeouts = new AtomicLong(); @GuardedBy("lock") private final Set allSessions = new HashSet<>(); @@ -2807,21 +2338,15 @@ enum Position { private final SessionConsumer sessionConsumer = new SessionConsumerImpl(); - private final MultiplexedSessionInitializationConsumer multiplexedSessionInitializationConsumer = - new MultiplexedSessionInitializationConsumer(); - private final MultiplexedSessionMaintainerConsumer multiplexedMaintainerConsumer = - new MultiplexedSessionMaintainerConsumer(); - @VisibleForTesting Function idleSessionRemovedListener; @VisibleForTesting Function longRunningSessionRemovedListener; - @VisibleForTesting Function multiplexedSessionRemovedListener; private final CountDownLatch waitOnMinSessionsLatch; - private final CountDownLatch waitOnMultiplexedSessionsLatch; - private final SessionReplacementHandler pooledSessionReplacementHandler = + private final PooledSessionReplacementHandler pooledSessionReplacementHandler = new PooledSessionReplacementHandler(); - private static final SessionReplacementHandler multiplexedSessionReplacementHandler = - new MultiplexedSessionReplacementHandler(); + + private static final Object DENY_LISTED = new Object(); + private final Cache denyListedChannels; /** * Create a session pool with the given options and for the given database. It will also start @@ -2965,13 +2490,22 @@ private SessionPool( openTelemetry, attributes, numMultiplexedSessionsAcquired, numMultiplexedSessionsReleased); this.waitOnMinSessionsLatch = options.getMinSessions() > 0 ? new CountDownLatch(1) : new CountDownLatch(0); - this.waitOnMultiplexedSessionsLatch = new CountDownLatch(1); - } - - // TODO: Remove once all code for multiplexed sessions has been removed from the pool. - private boolean useMultiplexedSessions() { - // Multiplexed sessions have moved to MultiplexedSessionDatabaseClient - return false; + this.denyListedChannels = + RetryOnDifferentGrpcChannelErrorHandler.isEnabled() + ? CacheBuilder.newBuilder() + .expireAfterWrite(java.time.Duration.ofMinutes(1)) + .maximumSize(this.numChannels) + .concurrencyLevel(1) + .ticker( + new Ticker() { + @Override + public long read() { + return TimeUnit.NANOSECONDS.convert( + clock.instant().toEpochMilli(), TimeUnit.MILLISECONDS); + } + }) + .build() + : null; } /** @@ -3007,7 +2541,7 @@ Dialect getDialect() { } } - SessionReplacementHandler getPooledSessionReplacementHandler() { + PooledSessionReplacementHandler getPooledSessionReplacementHandler() { return pooledSessionReplacementHandler; } @@ -3087,13 +2621,6 @@ int getTotalSessionsPlusNumSessionsBeingCreated() { } } - @VisibleForTesting - boolean isMultiplexedSessionBeingCreated() { - synchronized (lock) { - return multiplexedSessionBeingCreated; - } - } - @VisibleForTesting long getNumWaiterTimeouts() { return numWaiterTimeouts.get(); @@ -3105,9 +2632,6 @@ private void initPool() { if (options.getMinSessions() > 0) { createSessions(options.getMinSessions(), true); } - if (useMultiplexedSessions()) { - maybeCreateMultiplexedSession(multiplexedSessionInitializationConsumer); - } } } @@ -3153,7 +2677,8 @@ private Tuple findSessionToKeepAlive( && (numChecked + numAlreadyChecked) < (options.getMinSessions() + options.getMaxIdleSessions() - numSessionsInUse)) { PooledSession session = iterator.next(); - if (session.delegate.getLastUseTime().isBefore(keepAliveThreshold)) { + if (session.delegate.getLastUseTime() != null + && session.delegate.getLastUseTime().isBefore(keepAliveThreshold)) { iterator.remove(); return Tuple.of(session, numChecked); } @@ -3173,36 +2698,8 @@ boolean isValid() { * Returns a multiplexed session. The method fallbacks to a regular session if {@link * SessionPoolOptions#getUseMultiplexedSession} is not set. */ - SessionFutureWrapper getMultiplexedSessionWithFallback() throws SpannerException { - if (useMultiplexedSessions()) { - ISpan span = tracer.getCurrentSpan(); - try { - return getWrappedMultiplexedSessionFuture(span); - } catch (Throwable t) { - span.addAnnotation("No multiplexed session available."); - throw asSpannerException(t.getCause()); - } - } else { - return new PooledSessionFutureWrapper(getSession()); - } - } - - SessionFutureWrapper getWrappedMultiplexedSessionFuture(ISpan span) { - return new MultiplexedSessionFutureWrapper(span); - } - - /** - * This method is a blocking method. It will block until the underlying {@code - * SettableApiFuture} is resolved. - */ - SessionReference getMultiplexedSessionInstance() { - try { - return currentMultiplexedSessionReference.get().get(); - } catch (InterruptedException e) { - throw SpannerExceptionFactory.propagateInterrupt(e); - } catch (ExecutionException e) { - throw asSpannerException(e.getCause()); - } + PooledSessionFutureWrapper getMultiplexedSessionWithFallback() throws SpannerException { + return new PooledSessionFutureWrapper(getSession()); } /** @@ -3239,7 +2736,26 @@ PooledSessionFuture getSession() throws SpannerException { resourceNotFoundException.getMessage()), resourceNotFoundException); } - sess = sessions.poll(); + if (denyListedChannels != null + && denyListedChannels.size() > 0 + && denyListedChannels.size() < numChannels) { + // There are deny-listed channels. Get a session that is not affiliated with a deny-listed + // channel. + for (PooledSession session : sessions) { + if (denyListedChannels.getIfPresent(session.getChannel()) == null) { + sessions.remove(session); + sess = session; + break; + } + // Size is cached and can change after calling getIfPresent. + if (denyListedChannels.size() == 0) { + break; + } + } + } + if (sess == null) { + sess = sessions.poll(); + } if (sess == null) { span.addAnnotation("No session available"); maybeCreateSession(); @@ -3271,14 +2787,12 @@ private PooledSessionFuture checkoutSession( return res; } - private void incrementNumSessionsInUse(boolean isMultiplexed) { + private void incrementNumSessionsInUse() { synchronized (lock) { - if (!isMultiplexed) { - if (maxSessionsInUse < ++numSessionsInUse) { - maxSessionsInUse = numSessionsInUse; - } - numSessionsAcquired++; + if (maxSessionsInUse < ++numSessionsInUse) { + maxSessionsInUse = numSessionsInUse; } + numSessionsAcquired++; } } @@ -3496,7 +3010,7 @@ static boolean isUnbalanced( private void handleCreateSessionsFailure(SpannerException e, int count) { synchronized (lock) { for (int i = 0; i < count; i++) { - if (waiters.size() > 0) { + if (!waiters.isEmpty()) { waiters.poll().put(e); } else { break; @@ -3638,20 +3152,6 @@ private boolean canCreateSession() { } } - private void maybeCreateMultiplexedSession(SessionConsumer sessionConsumer) { - synchronized (lock) { - if (!multiplexedSessionBeingCreated) { - logger.log(Level.FINE, String.format("Creating multiplexed sessions")); - try { - multiplexedSessionBeingCreated = true; - sessionClient.asyncCreateMultiplexedSession(sessionConsumer); - } catch (Throwable ignore) { - // such an exception will never be thrown. the exception will be passed onto the consumer. - } - } - } - } - private void createSessions(final int sessionCount, boolean distributeOverChannels) { logger.log(Level.FINE, String.format("Creating %d sessions", sessionCount)); synchronized (lock) { @@ -3674,99 +3174,6 @@ private void createSessions(final int sessionCount, boolean distributeOverChanne } } - /** - * Callback interface which is invoked when a multiplexed session is being replaced by the - * background maintenance thread. When a multiplexed session creation fails during background - * thread, it would simply log the exception and retry the session creation in the next background - * thread invocation. - * - *

This consumer is not used when the multiplexed session is getting initialized for the first - * time during application startup. We instead use {@link - * MultiplexedSessionInitializationConsumer} for the first time when multiplexed session is - * getting created. - */ - class MultiplexedSessionMaintainerConsumer implements SessionConsumer { - @Override - public void onSessionReady(SessionImpl sessionImpl) { - final SessionReference sessionReference = sessionImpl.getSessionReference(); - final SettableFuture settableFuture = SettableFuture.create(); - settableFuture.set(sessionReference); - - synchronized (lock) { - SessionReference oldSession = null; - if (currentMultiplexedSessionReference.get().isDone()) { - oldSession = getMultiplexedSessionInstance(); - } - SettableApiFuture settableApiFuture = SettableApiFuture.create(); - settableApiFuture.set(sessionReference); - currentMultiplexedSessionReference.set(settableApiFuture); - if (oldSession != null) { - logger.log( - Level.INFO, - String.format( - "Removed Multiplexed Session => %s created at => %s", - oldSession.getName(), oldSession.getCreateTime())); - if (multiplexedSessionRemovedListener != null) { - multiplexedSessionRemovedListener.apply(oldSession); - } - } - multiplexedSessionBeingCreated = false; - } - } - - /** - * Method which logs the exception so that session creation can be re-attempted in the next - * background thread invocation. - */ - @Override - public void onSessionCreateFailure(Throwable t, int createFailureForSessionCount) { - synchronized (lock) { - multiplexedSessionBeingCreated = false; - } - logger.log( - Level.WARNING, - String.format( - "Failed to create multiplexed session. " - + "Pending replacing stale multiplexed session", - t)); - } - } - - /** - * Callback interface which is invoked when a multiplexed session is getting initialised for the - * first time when a session is getting created. - */ - class MultiplexedSessionInitializationConsumer implements SessionConsumer { - @Override - public void onSessionReady(SessionImpl sessionImpl) { - final SessionReference sessionReference = sessionImpl.getSessionReference(); - synchronized (lock) { - SettableApiFuture settableApiFuture = - currentMultiplexedSessionReference.get(); - settableApiFuture.set(sessionReference); - multiplexedSessionBeingCreated = false; - waitOnMultiplexedSessionsLatch.countDown(); - } - } - - /** - * When a multiplexed session fails during initialization we would like all pending threads to - * receive the exception and throw the error. This is done because at the time of start up there - * is no other multiplexed session which could have been assigned to the pending requests. - */ - @Override - public void onSessionCreateFailure(Throwable t, int createFailureForSessionCount) { - synchronized (lock) { - multiplexedSessionBeingCreated = false; - if (isDatabaseOrInstanceNotFound(asSpannerException(t))) { - setResourceNotFoundException((ResourceNotFoundException) t); - poolMaintainer.close(); - } - currentMultiplexedSessionReference.get().setException(asSpannerException(t)); - } - } - } - /** * {@link SessionConsumer} that receives the created sessions from a {@link SessionClient} and * releases these into the pool. The session pool only needs one instance of this, as all sessions diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionPoolOptions.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionPoolOptions.java index 382bef1b5a2..ba335cf8f9f 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionPoolOptions.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionPoolOptions.java @@ -16,7 +16,6 @@ package com.google.cloud.spanner; -import com.google.api.core.BetaApi; import com.google.api.core.InternalApi; import com.google.cloud.spanner.SessionPool.Position; import com.google.common.annotations.VisibleForTesting; @@ -74,7 +73,13 @@ public class SessionPoolOptions { private final boolean useMultiplexedSession; - private final boolean useRandomChannelHint; + /** + * Controls whether multiplexed session is enabled for blind write or not. This is only used for + * systest soak. TODO: Remove when multiplexed session for blind write is released. + */ + private final boolean useMultiplexedSessionBlindWrite; + + private final boolean useMultiplexedSessionForRW; // TODO: Change to use java.time.Duration. private final Duration multiplexedSessionMaintenanceDuration; @@ -105,12 +110,20 @@ private SessionPoolOptions(Builder builder) { this.randomizePositionQPSThreshold = builder.randomizePositionQPSThreshold; this.inactiveTransactionRemovalOptions = builder.inactiveTransactionRemovalOptions; this.poolMaintainerClock = builder.poolMaintainerClock; - // TODO: Remove when multiplexed sessions are guaranteed to be supported. + // useMultiplexedSession priority => Environment var > private setter > client default + Boolean useMultiplexedSessionFromEnvVariable = getUseMultiplexedSessionFromEnvVariable(); this.useMultiplexedSession = - builder.useMultiplexedSession - && !Boolean.parseBoolean( - System.getenv("GOOGLE_CLOUD_SPANNER_FORCE_DISABLE_MULTIPLEXED_SESSIONS")); - this.useRandomChannelHint = builder.useRandomChannelHint; + (useMultiplexedSessionFromEnvVariable != null) + ? useMultiplexedSessionFromEnvVariable + : builder.useMultiplexedSession; + this.useMultiplexedSessionBlindWrite = builder.useMultiplexedSessionBlindWrite; + // useMultiplexedSessionForRW priority => Environment var > private setter > client default + Boolean useMultiplexedSessionForRWFromEnvVariable = + getUseMultiplexedSessionForRWFromEnvVariable(); + this.useMultiplexedSessionForRW = + (useMultiplexedSessionForRWFromEnvVariable != null) + ? useMultiplexedSessionForRWFromEnvVariable + : builder.useMultiplexedSessionForRW; this.multiplexedSessionMaintenanceDuration = builder.multiplexedSessionMaintenanceDuration; } @@ -147,7 +160,7 @@ public boolean equals(Object o) { this.inactiveTransactionRemovalOptions, other.inactiveTransactionRemovalOptions) && Objects.equals(this.poolMaintainerClock, other.poolMaintainerClock) && Objects.equals(this.useMultiplexedSession, other.useMultiplexedSession) - && Objects.equals(this.useRandomChannelHint, other.useRandomChannelHint) + && Objects.equals(this.useMultiplexedSessionForRW, other.useMultiplexedSessionForRW) && Objects.equals( this.multiplexedSessionMaintenanceDuration, other.multiplexedSessionMaintenanceDuration); @@ -178,7 +191,8 @@ public int hashCode() { this.inactiveTransactionRemovalOptions, this.poolMaintainerClock, this.useMultiplexedSession, - this.useRandomChannelHint, + this.useMultiplexedSessionBlindWrite, + this.useMultiplexedSessionForRW, this.multiplexedSessionMaintenanceDuration); } @@ -312,8 +326,40 @@ public boolean getUseMultiplexedSession() { return useMultiplexedSession; } - boolean isUseRandomChannelHint() { - return useRandomChannelHint; + @VisibleForTesting + @InternalApi + protected boolean getUseMultiplexedSessionBlindWrite() { + return getUseMultiplexedSession() && useMultiplexedSessionBlindWrite; + } + + @VisibleForTesting + @InternalApi + public boolean getUseMultiplexedSessionForRW() { + // Multiplexed sessions for R/W are enabled only if both global multiplexed sessions and + // read-write multiplexed session flags are set to true. + return getUseMultiplexedSession() && useMultiplexedSessionForRW; + } + + private static Boolean getUseMultiplexedSessionFromEnvVariable() { + String useMultiplexedSessionFromEnvVariable = + System.getenv("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS"); + if (useMultiplexedSessionFromEnvVariable != null + && useMultiplexedSessionFromEnvVariable.length() > 0) { + if ("true".equalsIgnoreCase(useMultiplexedSessionFromEnvVariable) + || "false".equalsIgnoreCase(useMultiplexedSessionFromEnvVariable)) { + return Boolean.parseBoolean(useMultiplexedSessionFromEnvVariable); + } else { + throw new IllegalArgumentException( + "GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS should be either true or false."); + } + } + return null; + } + + private static Boolean getUseMultiplexedSessionForRWFromEnvVariable() { + // Checks the value of env, GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW + // This returns null until RW is supported. + return null; } Duration getMultiplexedSessionMaintenanceDuration() { @@ -350,24 +396,24 @@ enum ActionOnInactiveTransaction { static class InactiveTransactionRemovalOptions { /** Option to set the behaviour when there are inactive transactions. */ - private ActionOnInactiveTransaction actionOnInactiveTransaction; + private final ActionOnInactiveTransaction actionOnInactiveTransaction; /** * Frequency for closing inactive transactions. Between two consecutive task executions, it's * ensured that the duration is greater or equal to this duration. */ - private Duration executionFrequency; + private final Duration executionFrequency; /** * Long-running transactions will be cleaned up if utilisation is greater than the below value. */ - private double usedSessionsRatioThreshold; + private final double usedSessionsRatioThreshold; /** * A transaction is considered to be idle if it has not been used for a duration greater than * the below value. */ - private Duration idleTimeThreshold; + private final Duration idleTimeThreshold; InactiveTransactionRemovalOptions(final Builder builder) { this.actionOnInactiveTransaction = builder.actionOnInactiveTransaction; @@ -509,7 +555,7 @@ public static class Builder { private boolean autoDetectDialect = false; private Duration waitForMinSessions = Duration.ZERO; private Duration acquireSessionTimeout = Duration.ofSeconds(60); - private Position releaseToPosition = getReleaseToPositionFromSystemProperty(); + private final Position releaseToPosition = getReleaseToPositionFromSystemProperty(); /** * The session pool will randomize the position of a session that is being returned when this * threshold is exceeded. That is: If the transactions per second exceeds this threshold, then @@ -518,9 +564,18 @@ public static class Builder { */ private long randomizePositionQPSThreshold = 0L; - private boolean useMultiplexedSession = getUseMultiplexedSessionFromEnvVariable(); + // This field controls the default behavior of session management in Java client. + // Set useMultiplexedSession to true to make multiplexed session the default. + private boolean useMultiplexedSession = false; - private boolean useRandomChannelHint; + // TODO: Remove when multiplexed session for blind write is released. + private boolean useMultiplexedSessionBlindWrite = false; + + // This field controls the default behavior of session management for RW operations in Java + // client. + // Set useMultiplexedSessionForRW to true to make multiplexed session for RW operations the + // default. + private boolean useMultiplexedSessionForRW = false; private Duration multiplexedSessionMaintenanceDuration = Duration.ofDays(7); private Clock poolMaintainerClock = Clock.INSTANCE; @@ -538,18 +593,6 @@ private static Position getReleaseToPositionFromSystemProperty() { return Position.FIRST; } - /** - * This environment is only added to support internal spanner testing. Support for it can be - * removed in the future. Use {@link SessionPoolOptions#useMultiplexedSession} instead to use - * multiplexed sessions. - */ - @InternalApi - @BetaApi - private static boolean getUseMultiplexedSessionFromEnvVariable() { - return Boolean.parseBoolean( - System.getenv("GOOGLE_CLOUD_SPANNER_ENABLE_MULTIPLEXED_SESSIONS")); - } - public Builder() {} private Builder(SessionPoolOptions options) { @@ -574,6 +617,10 @@ private Builder(SessionPoolOptions options) { this.acquireSessionTimeout = options.acquireSessionTimeout; this.randomizePositionQPSThreshold = options.randomizePositionQPSThreshold; this.inactiveTransactionRemovalOptions = options.inactiveTransactionRemovalOptions; + this.useMultiplexedSession = options.useMultiplexedSession; + this.useMultiplexedSessionBlindWrite = options.useMultiplexedSessionBlindWrite; + this.useMultiplexedSessionForRW = options.useMultiplexedSessionForRW; + this.multiplexedSessionMaintenanceDuration = options.multiplexedSessionMaintenanceDuration; this.poolMaintainerClock = options.poolMaintainerClock; } @@ -760,8 +807,23 @@ Builder setUseMultiplexedSession(boolean useMultiplexedSession) { return this; } - Builder setUseRandomChannelHint(boolean useRandomChannelHint) { - this.useRandomChannelHint = useRandomChannelHint; + /** + * This method enables multiplexed sessions for blind writes. This method will be removed in the + * future when multiplexed sessions has been made the default for all operations. + */ + @InternalApi + @VisibleForTesting + Builder setUseMultiplexedSessionBlindWrite(boolean useMultiplexedSessionBlindWrite) { + this.useMultiplexedSessionBlindWrite = useMultiplexedSessionBlindWrite; + return this; + } + + /** + * Sets whether the client should use multiplexed session for R/W operations or not. This method + * is intentionally package-private and intended for internal use. + */ + Builder setUseMultiplexedSessionForRW(boolean useMultiplexedSessionForRW) { + this.useMultiplexedSessionForRW = useMultiplexedSessionForRW; return this; } @@ -878,11 +940,6 @@ public Builder setAcquireSessionTimeout(Duration acquireSessionTimeout) { return this; } - Builder setReleaseToPosition(Position releaseToPosition) { - this.releaseToPosition = Preconditions.checkNotNull(releaseToPosition); - return this; - } - Builder setRandomizePositionQPSThreshold(long randomizePositionQPSThreshold) { Preconditions.checkArgument( randomizePositionQPSThreshold >= 0L, "randomizePositionQPSThreshold must be >= 0"); diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerCloudMonitoringExporter.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerCloudMonitoringExporter.java new file mode 100644 index 00000000000..51dc890902c --- /dev/null +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerCloudMonitoringExporter.java @@ -0,0 +1,236 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.BuiltInMetricsConstant.SPANNER_METRICS; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.gax.core.CredentialsProvider; +import com.google.api.gax.core.FixedCredentialsProvider; +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.rpc.PermissionDeniedException; +import com.google.auth.Credentials; +import com.google.cloud.monitoring.v3.MetricServiceClient; +import com.google.cloud.monitoring.v3.MetricServiceSettings; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.MoreObjects; +import com.google.common.collect.Iterables; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.monitoring.v3.CreateTimeSeriesRequest; +import com.google.monitoring.v3.ProjectName; +import com.google.monitoring.v3.TimeSeries; +import com.google.protobuf.Empty; +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import org.threeten.bp.Duration; + +/** + * Spanner Cloud Monitoring OpenTelemetry Exporter. + * + *

The exporter will look for all spanner owned metrics under spanner.googleapis.com + * instrumentation scope and upload it via the Google Cloud Monitoring API. + */ +class SpannerCloudMonitoringExporter implements MetricExporter { + + private static final Logger logger = + Logger.getLogger(SpannerCloudMonitoringExporter.class.getName()); + + // This system property can be used to override the monitoring endpoint + // to a different environment. It's meant for internal testing only. + private static final String MONITORING_ENDPOINT = + MoreObjects.firstNonNull( + System.getProperty("spanner.test-monitoring-endpoint"), + MetricServiceSettings.getDefaultEndpoint()); + + // This the quota limit from Cloud Monitoring. More details in + // https://cloud.google.com/monitoring/quotas#custom_metrics_quotas. + private static final int EXPORT_BATCH_SIZE_LIMIT = 200; + private final AtomicBoolean spannerExportFailureLogged = new AtomicBoolean(false); + private CompletableResultCode lastExportCode; + private final MetricServiceClient client; + private final String spannerProjectId; + + static SpannerCloudMonitoringExporter create(String projectId, @Nullable Credentials credentials) + throws IOException { + MetricServiceSettings.Builder settingsBuilder = MetricServiceSettings.newBuilder(); + CredentialsProvider credentialsProvider; + if (credentials == null) { + credentialsProvider = NoCredentialsProvider.create(); + } else { + credentialsProvider = FixedCredentialsProvider.create(credentials); + } + settingsBuilder.setCredentialsProvider(credentialsProvider); + settingsBuilder.setEndpoint(MONITORING_ENDPOINT); + + org.threeten.bp.Duration timeout = Duration.ofMinutes(1); + // TODO: createServiceTimeSeries needs special handling if the request failed. Leaving + // it as not retried for now. + settingsBuilder.createServiceTimeSeriesSettings().setSimpleTimeoutNoRetries(timeout); + + return new SpannerCloudMonitoringExporter( + projectId, MetricServiceClient.create(settingsBuilder.build())); + } + + @VisibleForTesting + SpannerCloudMonitoringExporter(String projectId, MetricServiceClient client) { + this.client = client; + this.spannerProjectId = projectId; + } + + @Override + public CompletableResultCode export(Collection collection) { + if (client.isShutdown()) { + logger.log(Level.WARNING, "Exporter is shut down"); + return CompletableResultCode.ofFailure(); + } + + this.lastExportCode = exportSpannerClientMetrics(collection); + return lastExportCode; + } + + /** Export client built in metrics */ + private CompletableResultCode exportSpannerClientMetrics(Collection collection) { + // Filter spanner metrics + List spannerMetricData = + collection.stream() + .filter(md -> SPANNER_METRICS.contains(md.getName())) + .collect(Collectors.toList()); + + // Skips exporting if there's none + if (spannerMetricData.isEmpty()) { + return CompletableResultCode.ofSuccess(); + } + + // Verifies metrics project id is the same as the spanner project id set on this client + if (!spannerMetricData.stream() + .flatMap(metricData -> metricData.getData().getPoints().stream()) + .allMatch( + pd -> spannerProjectId.equals(SpannerCloudMonitoringExporterUtils.getProjectId(pd)))) { + logger.log(Level.WARNING, "Metric data has a different projectId. Skipping export."); + return CompletableResultCode.ofFailure(); + } + + List spannerTimeSeries; + try { + spannerTimeSeries = + SpannerCloudMonitoringExporterUtils.convertToSpannerTimeSeries(spannerMetricData); + } catch (Throwable e) { + logger.log( + Level.WARNING, + "Failed to convert spanner metric data to cloud monitoring timeseries.", + e); + return CompletableResultCode.ofFailure(); + } + + ProjectName projectName = ProjectName.of(spannerProjectId); + + ApiFuture> futureList = exportTimeSeriesInBatch(projectName, spannerTimeSeries); + + CompletableResultCode spannerExportCode = new CompletableResultCode(); + ApiFutures.addCallback( + futureList, + new ApiFutureCallback>() { + @Override + public void onFailure(Throwable throwable) { + if (spannerExportFailureLogged.compareAndSet(false, true)) { + String msg = "createServiceTimeSeries request failed for spanner metrics."; + if (throwable instanceof PermissionDeniedException) { + // TODO: Add the link of public documentation when available in the log message. + msg += + String.format( + " Need monitoring metric writer permission on project=%s.", + projectName.getProject()); + } + logger.log(Level.WARNING, msg, throwable); + } + spannerExportCode.fail(); + } + + @Override + public void onSuccess(List empty) { + // When an export succeeded reset the export failure flag to false so if there's a + // transient failure it'll be logged. + spannerExportFailureLogged.set(false); + spannerExportCode.succeed(); + } + }, + MoreExecutors.directExecutor()); + + return spannerExportCode; + } + + private ApiFuture> exportTimeSeriesInBatch( + ProjectName projectName, List timeSeries) { + List> batchResults = new ArrayList<>(); + + for (List batch : Iterables.partition(timeSeries, EXPORT_BATCH_SIZE_LIMIT)) { + CreateTimeSeriesRequest req = + CreateTimeSeriesRequest.newBuilder() + .setName(projectName.toString()) + .addAllTimeSeries(batch) + .build(); + batchResults.add(this.client.createServiceTimeSeriesCallable().futureCall(req)); + } + + return ApiFutures.allAsList(batchResults); + } + + @Override + public CompletableResultCode flush() { + return CompletableResultCode.ofSuccess(); + } + + @Override + public CompletableResultCode shutdown() { + if (client.isShutdown()) { + logger.log(Level.WARNING, "shutdown is called multiple times"); + return CompletableResultCode.ofSuccess(); + } + CompletableResultCode shutdownResult = new CompletableResultCode(); + try { + client.shutdown(); + shutdownResult.succeed(); + } catch (Throwable e) { + logger.log(Level.WARNING, "failed to shutdown the monitoring client", e); + shutdownResult.fail(); + } + return shutdownResult; + } + + /** + * For Google Cloud Monitoring always return CUMULATIVE to keep track of the cumulative value of a + * metric over time. + */ + @Override + public AggregationTemporality getAggregationTemporality(InstrumentType instrumentType) { + return AggregationTemporality.CUMULATIVE; + } +} diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerCloudMonitoringExporterUtils.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerCloudMonitoringExporterUtils.java new file mode 100644 index 00000000000..a6d1e29d587 --- /dev/null +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerCloudMonitoringExporterUtils.java @@ -0,0 +1,211 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.api.MetricDescriptor.MetricKind.CUMULATIVE; +import static com.google.api.MetricDescriptor.MetricKind.GAUGE; +import static com.google.api.MetricDescriptor.MetricKind.UNRECOGNIZED; +import static com.google.api.MetricDescriptor.ValueType.DISTRIBUTION; +import static com.google.api.MetricDescriptor.ValueType.DOUBLE; +import static com.google.api.MetricDescriptor.ValueType.INT64; +import static com.google.cloud.spanner.BuiltInMetricsConstant.GAX_METER_NAME; +import static com.google.cloud.spanner.BuiltInMetricsConstant.PROJECT_ID_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.SPANNER_PROMOTED_RESOURCE_LABELS; +import static com.google.cloud.spanner.BuiltInMetricsConstant.SPANNER_RESOURCE_TYPE; + +import com.google.api.Distribution; +import com.google.api.Distribution.BucketOptions; +import com.google.api.Distribution.BucketOptions.Explicit; +import com.google.api.Metric; +import com.google.api.MetricDescriptor.MetricKind; +import com.google.api.MetricDescriptor.ValueType; +import com.google.api.MonitoredResource; +import com.google.monitoring.v3.Point; +import com.google.monitoring.v3.TimeInterval; +import com.google.monitoring.v3.TimeSeries; +import com.google.monitoring.v3.TypedValue; +import com.google.protobuf.util.Timestamps; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.DoublePointData; +import io.opentelemetry.sdk.metrics.data.HistogramData; +import io.opentelemetry.sdk.metrics.data.HistogramPointData; +import io.opentelemetry.sdk.metrics.data.LongPointData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.data.MetricDataType; +import io.opentelemetry.sdk.metrics.data.PointData; +import io.opentelemetry.sdk.metrics.data.SumData; +import java.util.ArrayList; +import java.util.List; +import java.util.logging.Level; +import java.util.logging.Logger; + +class SpannerCloudMonitoringExporterUtils { + + private static final Logger logger = + Logger.getLogger(SpannerCloudMonitoringExporterUtils.class.getName()); + + private SpannerCloudMonitoringExporterUtils() {} + + static String getProjectId(PointData pointData) { + return pointData.getAttributes().get(PROJECT_ID_KEY); + } + + static List convertToSpannerTimeSeries(List collection) { + List allTimeSeries = new ArrayList<>(); + + for (MetricData metricData : collection) { + // Get common metrics data from GAX library + if (!metricData.getInstrumentationScopeInfo().getName().equals(GAX_METER_NAME)) { + // Filter out metric data for instruments that are not part of the spanner metrics list + continue; + } + metricData.getData().getPoints().stream() + .map(pointData -> convertPointToSpannerTimeSeries(metricData, pointData)) + .forEach(allTimeSeries::add); + } + + return allTimeSeries; + } + + private static TimeSeries convertPointToSpannerTimeSeries( + MetricData metricData, PointData pointData) { + TimeSeries.Builder builder = + TimeSeries.newBuilder() + .setMetricKind(convertMetricKind(metricData)) + .setValueType(convertValueType(metricData.getType())); + Metric.Builder metricBuilder = Metric.newBuilder().setType(metricData.getName()); + + Attributes attributes = pointData.getAttributes(); + MonitoredResource.Builder monitoredResourceBuilder = + MonitoredResource.newBuilder().setType(SPANNER_RESOURCE_TYPE); + + for (AttributeKey key : attributes.asMap().keySet()) { + if (SPANNER_PROMOTED_RESOURCE_LABELS.contains(key)) { + monitoredResourceBuilder.putLabels(key.getKey(), String.valueOf(attributes.get(key))); + } else { + metricBuilder.putLabels(key.getKey(), String.valueOf(attributes.get(key))); + } + } + + builder.setResource(monitoredResourceBuilder.build()); + builder.setMetric(metricBuilder.build()); + + TimeInterval timeInterval = + TimeInterval.newBuilder() + .setStartTime(Timestamps.fromNanos(pointData.getStartEpochNanos())) + .setEndTime(Timestamps.fromNanos(pointData.getEpochNanos())) + .build(); + + builder.addPoints(createPoint(metricData.getType(), pointData, timeInterval)); + + return builder.build(); + } + + private static MetricKind convertMetricKind(MetricData metricData) { + switch (metricData.getType()) { + case HISTOGRAM: + case EXPONENTIAL_HISTOGRAM: + return convertHistogramType(metricData.getHistogramData()); + case LONG_GAUGE: + case DOUBLE_GAUGE: + return GAUGE; + case LONG_SUM: + return convertSumDataType(metricData.getLongSumData()); + case DOUBLE_SUM: + return convertSumDataType(metricData.getDoubleSumData()); + default: + return UNRECOGNIZED; + } + } + + private static MetricKind convertHistogramType(HistogramData histogramData) { + if (histogramData.getAggregationTemporality() == AggregationTemporality.CUMULATIVE) { + return CUMULATIVE; + } + return UNRECOGNIZED; + } + + private static MetricKind convertSumDataType(SumData sum) { + if (!sum.isMonotonic()) { + return GAUGE; + } + if (sum.getAggregationTemporality() == AggregationTemporality.CUMULATIVE) { + return CUMULATIVE; + } + return UNRECOGNIZED; + } + + private static ValueType convertValueType(MetricDataType metricDataType) { + switch (metricDataType) { + case LONG_GAUGE: + case LONG_SUM: + return INT64; + case DOUBLE_GAUGE: + case DOUBLE_SUM: + return DOUBLE; + case HISTOGRAM: + case EXPONENTIAL_HISTOGRAM: + return DISTRIBUTION; + default: + return ValueType.UNRECOGNIZED; + } + } + + private static Point createPoint( + MetricDataType type, PointData pointData, TimeInterval timeInterval) { + Point.Builder builder = Point.newBuilder().setInterval(timeInterval); + switch (type) { + case HISTOGRAM: + case EXPONENTIAL_HISTOGRAM: + return builder + .setValue( + TypedValue.newBuilder() + .setDistributionValue(convertHistogramData((HistogramPointData) pointData)) + .build()) + .build(); + case DOUBLE_GAUGE: + case DOUBLE_SUM: + return builder + .setValue( + TypedValue.newBuilder() + .setDoubleValue(((DoublePointData) pointData).getValue()) + .build()) + .build(); + case LONG_GAUGE: + case LONG_SUM: + return builder + .setValue(TypedValue.newBuilder().setInt64Value(((LongPointData) pointData).getValue())) + .build(); + default: + logger.log(Level.WARNING, "unsupported metric type"); + return builder.build(); + } + } + + private static Distribution convertHistogramData(HistogramPointData pointData) { + return Distribution.newBuilder() + .setCount(pointData.getCount()) + .setMean(pointData.getCount() == 0L ? 0.0D : pointData.getSum() / pointData.getCount()) + .setBucketOptions( + BucketOptions.newBuilder() + .setExplicitBuckets(Explicit.newBuilder().addAllBounds(pointData.getBoundaries()))) + .addAllBucketCounts(pointData.getCounts()) + .build(); + } +} diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerExceptionFactory.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerExceptionFactory.java index 2c52192d214..39b254fe997 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerExceptionFactory.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerExceptionFactory.java @@ -181,6 +181,17 @@ public static SpannerException newSpannerException(@Nullable Context context, Th return newSpannerException(ErrorCode.fromGrpcStatus(status), cause.getMessage(), cause); } + /** + * Creates a new SpannerException that indicates that the RPC or transaction should be retried on + * a different gRPC channel. This is an experimental feature that can be removed in the future. + * The exception should not be surfaced to the client application, and should instead be caught + * and handled in the client library. + */ + static SpannerException newRetryOnDifferentGrpcChannelException( + String message, int channel, Throwable cause) { + return new RetryOnDifferentGrpcChannelException(message, channel, cause); + } + static SpannerException newSpannerExceptionForCancellation( @Nullable Context context, @Nullable Throwable cause) { if (context != null && context.isCancelled()) { @@ -322,7 +333,9 @@ private static boolean isRetryable(ErrorCode code, @Nullable Throwable cause) { case UNAVAILABLE: // SSLHandshakeException is (probably) not retryable, as it is an indication that the server // certificate was not accepted by the client. - return !hasCauseMatching(cause, Matchers.isSSLHandshakeException); + // Channel shutdown is also not a retryable exception. + return !(hasCauseMatching(cause, Matchers.isSSLHandshakeException) + || hasCauseMatching(cause, Matchers.IS_CHANNEL_SHUTDOWN_EXCEPTION)); case RESOURCE_EXHAUSTED: return SpannerException.extractRetryDelay(cause) > 0; default: @@ -345,5 +358,8 @@ private static class Matchers { static final Predicate isRetryableInternalError = new IsRetryableInternalError(); static final Predicate isSSLHandshakeException = new IsSslHandshakeException(); + + static final Predicate IS_CHANNEL_SHUTDOWN_EXCEPTION = + new IsChannelShutdownException(); } } diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerImpl.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerImpl.java index 86b5de01c69..e5982cba0c8 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerImpl.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerImpl.java @@ -63,11 +63,12 @@ class SpannerImpl extends BaseService implements Spanner { final TraceWrapper tracer = new TraceWrapper( Tracing.getTracer(), - this.getOptions() + getOptions() .getOpenTelemetry() .getTracer( MetricRegistryConstants.INSTRUMENTATION_SCOPE, - GaxProperties.getLibraryVersion(this.getOptions().getClass()))); + GaxProperties.getLibraryVersion(this.getOptions().getClass())), + getOptions().isEnableExtendedTracing()); static final String CREATE_MULTIPLEXED_SESSION = "CloudSpannerOperation.CreateMultiplexedSession"; static final String CREATE_SESSION = "CloudSpannerOperation.CreateSession"; @@ -80,6 +81,8 @@ class SpannerImpl extends BaseService implements Spanner { static final String QUERY = "CloudSpannerOperation.ExecuteStreamingQuery"; static final String READ = "CloudSpannerOperation.ExecuteStreamingRead"; static final String BATCH_WRITE = "CloudSpannerOperation.BatchWrite"; + static final String UPDATE = "CloudSpannerOperation.ExecuteUpdate"; + static final String BATCH_UPDATE = "CloudSpannerOperation.BatchUpdate"; private static final Object CLIENT_ID_LOCK = new Object(); @@ -274,6 +277,9 @@ public DatabaseClient getDatabaseClient(DatabaseId db) { boolean useMultiplexedSession = getOptions().getSessionPoolOptions().getUseMultiplexedSession(); + boolean useMultiplexedSessionForRW = + getOptions().getSessionPoolOptions().getUseMultiplexedSessionForRW(); + MultiplexedSessionDatabaseClient multiplexedSessionDatabaseClient = useMultiplexedSession ? new MultiplexedSessionDatabaseClient(SpannerImpl.this.getSessionClient(db)) @@ -297,7 +303,11 @@ public DatabaseClient getDatabaseClient(DatabaseId db) { numMultiplexedSessionsReleased); pool.maybeWaitOnMinSessions(); DatabaseClientImpl dbClient = - createDatabaseClient(clientId, pool, multiplexedSessionDatabaseClient); + createDatabaseClient( + clientId, + pool, + getOptions().getSessionPoolOptions().getUseMultiplexedSessionBlindWrite(), + multiplexedSessionDatabaseClient); dbClients.put(db, dbClient); return dbClient; } @@ -308,8 +318,10 @@ public DatabaseClient getDatabaseClient(DatabaseId db) { DatabaseClientImpl createDatabaseClient( String clientId, SessionPool pool, + boolean useMultiplexedSessionBlindWrite, @Nullable MultiplexedSessionDatabaseClient multiplexedSessionClient) { - return new DatabaseClientImpl(clientId, pool, multiplexedSessionClient, tracer); + return new DatabaseClientImpl( + clientId, pool, useMultiplexedSessionBlindWrite, multiplexedSessionClient, tracer); } @Override diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerOptions.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerOptions.java index a16be179ce3..5756ff64b89 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerOptions.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerOptions.java @@ -21,12 +21,18 @@ import com.google.api.core.InternalApi; import com.google.api.core.ObsoleteApi; import com.google.api.gax.core.ExecutorProvider; +import com.google.api.gax.core.GaxProperties; import com.google.api.gax.grpc.GrpcCallContext; import com.google.api.gax.grpc.GrpcInterceptorProvider; import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; import com.google.api.gax.retrying.RetrySettings; import com.google.api.gax.rpc.ApiCallContext; import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.tracing.ApiTracerFactory; +import com.google.api.gax.tracing.BaseApiTracerFactory; +import com.google.api.gax.tracing.MetricsTracerFactory; +import com.google.api.gax.tracing.OpenTelemetryMetricsRecorder; +import com.google.api.gax.tracing.OpencensusTracerFactory; import com.google.cloud.NoCredentials; import com.google.cloud.ServiceDefaults; import com.google.cloud.ServiceOptions; @@ -64,10 +70,13 @@ import io.grpc.MethodDescriptor; import io.opentelemetry.api.GlobalOpenTelemetry; import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; import java.io.IOException; import java.net.MalformedURLException; import java.net.URL; +import java.util.ArrayList; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; @@ -99,7 +108,7 @@ public class SpannerOptions extends ServiceOptions { ImmutableSet.of( "https://www.googleapis.com/auth/spanner.admin", "https://www.googleapis.com/auth/spanner.data"); - private static final int MAX_CHANNELS = 256; + static final int MAX_CHANNELS = 256; @VisibleForTesting static final int DEFAULT_CHANNELS = 4; // Set the default number of channels to GRPC_GCP_ENABLED_DEFAULT_CHANNELS when gRPC-GCP extension // is enabled, to make sure there are sufficient channels available to move the sessions to a @@ -127,6 +136,8 @@ public class SpannerOptions extends ServiceOptions { private final boolean autoThrottleAdministrativeRequests; private final RetrySettings retryAdministrativeRequestsSettings; private final boolean trackTransactionStarter; + private final BuiltInOpenTelemetryMetricsProvider builtInOpenTelemetryMetricsProvider = + BuiltInOpenTelemetryMetricsProvider.INSTANCE; /** * These are the default {@link QueryOptions} defined by the user on this {@link SpannerOptions}. */ @@ -149,6 +160,10 @@ public class SpannerOptions extends ServiceOptions { private final DirectedReadOptions directedReadOptions; private final boolean useVirtualThreads; private final OpenTelemetry openTelemetry; + private final boolean enableApiTracing; + private final boolean enableBuiltInMetrics; + private final boolean enableExtendedTracing; + private final boolean enableEndToEndTracing; enum TracingFramework { OPEN_CENSUS, @@ -653,6 +668,10 @@ protected SpannerOptions(Builder builder) { directedReadOptions = builder.directedReadOptions; useVirtualThreads = builder.useVirtualThreads; openTelemetry = builder.openTelemetry; + enableApiTracing = builder.enableApiTracing; + enableExtendedTracing = builder.enableExtendedTracing; + enableBuiltInMetrics = builder.enableBuiltInMetrics; + enableEndToEndTracing = builder.enableEndToEndTracing; } /** @@ -665,7 +684,9 @@ public interface SpannerEnvironment { * set. */ @Nonnull - String getOptimizerVersion(); + default String getOptimizerVersion() { + return ""; + } /** * The optimizer statistics package to use. Must return an empty string to indicate that no @@ -673,7 +694,23 @@ public interface SpannerEnvironment { */ @Nonnull default String getOptimizerStatisticsPackage() { - throw new UnsupportedOperationException("Unimplemented"); + return ""; + } + + default boolean isEnableExtendedTracing() { + return false; + } + + default boolean isEnableApiTracing() { + return false; + } + + default boolean isEnableBuiltInMetrics() { + return false; + } + + default boolean isEnableEndToEndTracing() { + return false; } } @@ -686,19 +723,48 @@ private static class SpannerEnvironmentImpl implements SpannerEnvironment { private static final String SPANNER_OPTIMIZER_VERSION_ENV_VAR = "SPANNER_OPTIMIZER_VERSION"; private static final String SPANNER_OPTIMIZER_STATISTICS_PACKAGE_ENV_VAR = "SPANNER_OPTIMIZER_STATISTICS_PACKAGE"; + private static final String SPANNER_ENABLE_EXTENDED_TRACING = "SPANNER_ENABLE_EXTENDED_TRACING"; + private static final String SPANNER_ENABLE_API_TRACING = "SPANNER_ENABLE_API_TRACING"; + private static final String SPANNER_ENABLE_BUILTIN_METRICS = "SPANNER_ENABLE_BUILTIN_METRICS"; + private static final String SPANNER_ENABLE_END_TO_END_TRACING = + "SPANNER_ENABLE_END_TO_END_TRACING"; private SpannerEnvironmentImpl() {} + @Nonnull @Override public String getOptimizerVersion() { return MoreObjects.firstNonNull(System.getenv(SPANNER_OPTIMIZER_VERSION_ENV_VAR), ""); } + @Nonnull @Override public String getOptimizerStatisticsPackage() { return MoreObjects.firstNonNull( System.getenv(SPANNER_OPTIMIZER_STATISTICS_PACKAGE_ENV_VAR), ""); } + + @Override + public boolean isEnableExtendedTracing() { + return Boolean.parseBoolean(System.getenv(SPANNER_ENABLE_EXTENDED_TRACING)); + } + + @Override + public boolean isEnableApiTracing() { + return Boolean.parseBoolean(System.getenv(SPANNER_ENABLE_API_TRACING)); + } + + @Override + public boolean isEnableBuiltInMetrics() { + // The environment variable SPANNER_ENABLE_BUILTIN_METRICS is used for testing and will be + // removed in the future. + return Boolean.parseBoolean(System.getenv(SPANNER_ENABLE_BUILTIN_METRICS)); + } + + @Override + public boolean isEnableEndToEndTracing() { + return Boolean.parseBoolean(System.getenv(SPANNER_ENABLE_END_TO_END_TRACING)); + } } /** Builder for {@link SpannerOptions} instances. */ @@ -706,9 +772,7 @@ public static class Builder extends ServiceOptions.Builder { static final int DEFAULT_PREFETCH_CHUNKS = 4; static final QueryOptions DEFAULT_QUERY_OPTIONS = QueryOptions.getDefaultInstance(); - // TODO: Set the default to DecodeMode.DIRECT before merging to keep the current default. - // It is currently set to LAZY_PER_COL so it is used in all tests. - static final DecodeMode DEFAULT_DECODE_MODE = DecodeMode.LAZY_PER_COL; + static final DecodeMode DEFAULT_DECODE_MODE = DecodeMode.DIRECT; static final RetrySettings DEFAULT_ADMIN_REQUESTS_LIMIT_EXCEEDED_RETRY_SETTINGS = RetrySettings.newBuilder() .setInitialRetryDelay(Duration.ofSeconds(5L)) @@ -762,6 +826,10 @@ public static class Builder private DirectedReadOptions directedReadOptions; private boolean useVirtualThreads = false; private OpenTelemetry openTelemetry; + private boolean enableApiTracing = SpannerOptions.environment.isEnableApiTracing(); + private boolean enableExtendedTracing = SpannerOptions.environment.isEnableExtendedTracing(); + private boolean enableBuiltInMetrics = SpannerOptions.environment.isEnableBuiltInMetrics(); + private boolean enableEndToEndTracing = SpannerOptions.environment.isEnableEndToEndTracing(); private static String createCustomClientLibToken(String token) { return token + " " + ServiceOptions.getGoogApiClientLibName(); @@ -825,6 +893,10 @@ protected Builder() { this.attemptDirectPath = options.attemptDirectPath; this.directedReadOptions = options.directedReadOptions; this.useVirtualThreads = options.useVirtualThreads; + this.enableApiTracing = options.enableApiTracing; + this.enableExtendedTracing = options.enableExtendedTracing; + this.enableBuiltInMetrics = options.enableBuiltInMetrics; + this.enableEndToEndTracing = options.enableEndToEndTracing; } @Override @@ -1248,14 +1320,20 @@ public Builder setHost(String host) { return this; } - /** Enables gRPC-GCP extension with the default settings. */ + /** + * Enables gRPC-GCP extension with the default settings. Do not set + * GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS to true in combination with this option, as + * Multiplexed sessions are not supported for gRPC-GCP. + */ public Builder enableGrpcGcpExtension() { return this.enableGrpcGcpExtension(null); } /** * Enables gRPC-GCP extension and uses provided options for configuration. The metric registry - * and default Spanner metric labels will be added automatically. + * and default Spanner metric labels will be added automatically. Do not set + * GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS to true in combination with this option, as + * Multiplexed sessions are not supported for gRPC-GCP. */ public Builder enableGrpcGcpExtension(GcpManagedChannelOptions options) { this.grpcGcpExtensionEnabled = true; @@ -1321,6 +1399,48 @@ protected Builder setUseVirtualThreads(boolean useVirtualThreads) { return this; } + /** + * Creates and sets an {@link com.google.api.gax.tracing.ApiTracer} for the RPCs that are + * executed by this client. Enabling this creates traces for each individual RPC execution, + * including events/annotations when an RPC is retried or fails. The traces are only exported if + * an OpenTelemetry or OpenCensus trace exporter has been configured for the client. + */ + public Builder setEnableApiTracing(boolean enableApiTracing) { + this.enableApiTracing = enableApiTracing; + return this; + } + + /** Enabling this will enable built in metrics for each individual RPC execution. */ + Builder setEnableBuiltInMetrics(boolean enableBuiltInMetrics) { + this.enableBuiltInMetrics = enableBuiltInMetrics; + return this; + } + + /** + * Sets whether to enable extended OpenTelemetry tracing. Enabling this option will add the + * following additional attributes to the traces that are generated by the client: + * + *

    + *
  • db.statement: Contains the SQL statement that is being executed. + *
  • thread.name: The name of the thread that executes the statement. + *
+ */ + public Builder setEnableExtendedTracing(boolean enableExtendedTracing) { + this.enableExtendedTracing = enableExtendedTracing; + return this; + } + + /** + * Sets whether to enable end to end tracing. Enabling this option will create the trace spans + * at the Spanner layer. By default, end to end tracing is disabled. Enabling end to end tracing + * requires OpenTelemetry to be set up. Simply enabling this option won't generate traces at + * Spanner layer. + */ + public Builder setEnableEndToEndTracing(boolean enableEndToEndTracing) { + this.enableEndToEndTracing = enableEndToEndTracing; + return this; + } + @SuppressWarnings("rawtypes") @Override public SpannerOptions build() { @@ -1410,6 +1530,7 @@ public static void enableOpenCensusTraces() { */ @ObsoleteApi( "The OpenCensus project is deprecated. Use enableOpenTelemetryTraces to switch to OpenTelemetry traces") + @VisibleForTesting static void resetActiveTracingFramework() { activeTracingFramework = null; } @@ -1558,11 +1679,107 @@ public OpenTelemetry getOpenTelemetry() { } } + @Override + public ApiTracerFactory getApiTracerFactory() { + return createApiTracerFactory(false, false); + } + + public ApiTracerFactory getApiTracerFactory(boolean isAdminClient, boolean isEmulatorEnabled) { + return createApiTracerFactory(isAdminClient, isEmulatorEnabled); + } + + private ApiTracerFactory createApiTracerFactory( + boolean isAdminClient, boolean isEmulatorEnabled) { + List apiTracerFactories = new ArrayList<>(); + // Prefer any direct ApiTracerFactory that might have been set on the builder. + apiTracerFactories.add( + MoreObjects.firstNonNull(super.getApiTracerFactory(), getDefaultApiTracerFactory())); + + // Add Metrics Tracer factory if built in metrics are enabled and if the client is data client + // and if emulator is not enabled. + if (isEnableBuiltInMetrics() && !isAdminClient && !isEmulatorEnabled) { + ApiTracerFactory metricsTracerFactory = createMetricsApiTracerFactory(); + if (metricsTracerFactory != null) { + apiTracerFactories.add(metricsTracerFactory); + } + } + + return new CompositeTracerFactory(apiTracerFactories); + } + + private ApiTracerFactory getDefaultApiTracerFactory() { + if (isEnableApiTracing()) { + if (activeTracingFramework == TracingFramework.OPEN_TELEMETRY) { + return new OpenTelemetryApiTracerFactory( + getOpenTelemetry() + .getTracer( + MetricRegistryConstants.INSTRUMENTATION_SCOPE, + GaxProperties.getLibraryVersion(getClass())), + Attributes.empty()); + } else if (activeTracingFramework == TracingFramework.OPEN_CENSUS) { + return new OpencensusTracerFactory(); + } + } + return BaseApiTracerFactory.getInstance(); + } + + private ApiTracerFactory createMetricsApiTracerFactory() { + OpenTelemetry openTelemetry = + this.builtInOpenTelemetryMetricsProvider.getOrCreateOpenTelemetry( + getDefaultProjectId(), getCredentials()); + + return openTelemetry != null + ? new MetricsTracerFactory( + new OpenTelemetryMetricsRecorder(openTelemetry, BuiltInMetricsConstant.METER_NAME), + builtInOpenTelemetryMetricsProvider.createClientAttributes( + getDefaultProjectId(), + "spanner-java/" + GaxProperties.getLibraryVersion(getClass()))) + : null; + } + + /** + * Returns true if an {@link com.google.api.gax.tracing.ApiTracer} should be created and set on + * the Spanner client. Enabling this only has effect if an OpenTelemetry or OpenCensus trace + * exporter has been configured. + */ + public boolean isEnableApiTracing() { + return enableApiTracing; + } + + /** + * Returns true if an {@link com.google.api.gax.tracing.MetricsTracer} should be created and set + * on the Spanner client. + */ + boolean isEnableBuiltInMetrics() { + return enableBuiltInMetrics; + } + @BetaApi public boolean isUseVirtualThreads() { return useVirtualThreads; } + /** + * Returns whether extended OpenTelemetry tracing is enabled. Enabling this option will add the + * following additional attributes to the traces that are generated by the client: + * + *
    + *
  • db.statement: Contains the SQL statement that is being executed. + *
  • thread.name: The name of the thread that executes the statement. + *
+ */ + public boolean isEnableExtendedTracing() { + return enableExtendedTracing; + } + + /** + * Returns whether end to end tracing is enabled. If this option is enabled then trace spans will + * be created at the Spanner layer. + */ + public boolean isEndToEndTracingEnabled() { + return enableEndToEndTracing; + } + /** Returns the default query options to use for the specific database. */ public QueryOptions getDefaultQueryOptions(DatabaseId databaseId) { // Use the specific query options for the database if any have been specified. These have diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerRetryHelper.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerRetryHelper.java index 9c49efe2f11..a25c706e8a7 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerRetryHelper.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerRetryHelper.java @@ -23,6 +23,7 @@ import com.google.api.gax.retrying.TimedAttemptSettings; import com.google.cloud.RetryHelper; import com.google.cloud.RetryHelper.RetryHelperException; +import com.google.cloud.spanner.ErrorHandler.DefaultErrorHandler; import com.google.cloud.spanner.v1.stub.SpannerStub; import com.google.cloud.spanner.v1.stub.SpannerStubSettings; import com.google.common.annotations.VisibleForTesting; @@ -65,7 +66,12 @@ class SpannerRetryHelper { /** Executes the {@link Callable} and retries if it fails with an {@link AbortedException}. */ static T runTxWithRetriesOnAborted(Callable callable) { - return runTxWithRetriesOnAborted(callable, txRetrySettings, NanoClock.getDefaultClock()); + return runTxWithRetriesOnAborted(callable, DefaultErrorHandler.INSTANCE); + } + + static T runTxWithRetriesOnAborted(Callable callable, ErrorHandler errorHandler) { + return runTxWithRetriesOnAborted( + callable, errorHandler, txRetrySettings, NanoClock.getDefaultClock()); } /** @@ -75,11 +81,20 @@ static T runTxWithRetriesOnAborted(Callable callable) { @VisibleForTesting static T runTxWithRetriesOnAborted( Callable callable, RetrySettings retrySettings, ApiClock clock) { + return runTxWithRetriesOnAborted(callable, DefaultErrorHandler.INSTANCE, retrySettings, clock); + } + + @VisibleForTesting + static T runTxWithRetriesOnAborted( + Callable callable, + ErrorHandler errorHandler, + RetrySettings retrySettings, + ApiClock clock) { try { return RetryHelper.runWithRetries(callable, retrySettings, new TxRetryAlgorithm<>(), clock); } catch (RetryHelperException e) { if (e.getCause() != null) { - Throwables.throwIfUnchecked(e.getCause()); + Throwables.throwIfUnchecked(errorHandler.translateException(e.getCause())); } throw e; } @@ -107,9 +122,8 @@ public boolean shouldRetry(Throwable prevThrowable, T prevResponse) if (Context.current().isCancelled()) { throw SpannerExceptionFactory.newSpannerExceptionForCancellation(Context.current(), null); } - return prevThrowable != null - && (prevThrowable instanceof AbortedException - || prevThrowable instanceof com.google.api.gax.rpc.AbortedException); + return prevThrowable instanceof AbortedException + || prevThrowable instanceof com.google.api.gax.rpc.AbortedException; } } } diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TraceWrapper.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TraceWrapper.java index 25796968e9e..02638445ae2 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TraceWrapper.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TraceWrapper.java @@ -16,48 +16,78 @@ package com.google.cloud.spanner; +import com.google.cloud.spanner.Options.TagOption; +import com.google.cloud.spanner.Options.TransactionOption; import com.google.cloud.spanner.SpannerOptions.TracingFramework; +import com.google.common.base.MoreObjects; import io.opencensus.trace.BlankSpan; import io.opencensus.trace.Span; import io.opencensus.trace.Tracer; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.common.AttributesBuilder; import io.opentelemetry.context.Context; +import java.util.Arrays; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; class TraceWrapper { + private static final AttributeKey TRANSACTION_TAG_KEY = + AttributeKey.stringKey("transaction.tag"); + private static final AttributeKey STATEMENT_TAG_KEY = + AttributeKey.stringKey("statement.tag"); + private static final AttributeKey DB_STATEMENT_KEY = + AttributeKey.stringKey("db.statement"); + private static final AttributeKey> DB_STATEMENT_ARRAY_KEY = + AttributeKey.stringArrayKey("db.statement"); + private static final AttributeKey THREAD_NAME_KEY = AttributeKey.stringKey("thread.name"); private final Tracer openCensusTracer; private final io.opentelemetry.api.trace.Tracer openTelemetryTracer; + private final boolean enableExtendedTracing; - TraceWrapper(Tracer openCensusTracer, io.opentelemetry.api.trace.Tracer openTelemetryTracer) { + TraceWrapper( + Tracer openCensusTracer, + io.opentelemetry.api.trace.Tracer openTelemetryTracer, + boolean enableExtendedTracing) { this.openTelemetryTracer = openTelemetryTracer; this.openCensusTracer = openCensusTracer; + this.enableExtendedTracing = enableExtendedTracing; } ISpan spanBuilder(String spanName) { + return spanBuilder(spanName, Attributes.empty()); + } + + ISpan spanBuilder(String spanName, TransactionOption... options) { + return spanBuilder(spanName, createTransactionAttributes(options)); + } + + ISpan spanBuilder(String spanName, Attributes attributes) { if (SpannerOptions.getActiveTracingFramework().equals(TracingFramework.OPEN_TELEMETRY)) { - return new OpenTelemetrySpan(openTelemetryTracer.spanBuilder(spanName).startSpan()); + return new OpenTelemetrySpan( + openTelemetryTracer.spanBuilder(spanName).setAllAttributes(attributes).startSpan()); } else { return new OpenCensusSpan(openCensusTracer.spanBuilder(spanName).startSpan()); } } ISpan spanBuilderWithExplicitParent(String spanName, ISpan parentSpan) { + return spanBuilderWithExplicitParent(spanName, parentSpan, Attributes.empty()); + } + + ISpan spanBuilderWithExplicitParent(String spanName, ISpan parentSpan, Attributes attributes) { if (SpannerOptions.getActiveTracingFramework().equals(TracingFramework.OPEN_TELEMETRY)) { OpenTelemetrySpan otParentSpan = (OpenTelemetrySpan) parentSpan; - io.opentelemetry.api.trace.Span otSpan; - + io.opentelemetry.api.trace.SpanBuilder otSpan = + openTelemetryTracer.spanBuilder(spanName).setAllAttributes(attributes); if (otParentSpan != null && otParentSpan.getOpenTelemetrySpan() != null) { - otSpan = - openTelemetryTracer - .spanBuilder(spanName) - .setParent(Context.current().with(otParentSpan.getOpenTelemetrySpan())) - .startSpan(); - } else { - otSpan = openTelemetryTracer.spanBuilder(spanName).startSpan(); + otSpan = otSpan.setParent(Context.current().with(otParentSpan.getOpenTelemetrySpan())); } - - return new OpenTelemetrySpan(otSpan); - + return new OpenTelemetrySpan(otSpan.startSpan()); } else { OpenCensusSpan parentOcSpan = (OpenCensusSpan) parentSpan; Span ocSpan = @@ -106,4 +136,58 @@ IScope withSpan(ISpan span) { return new OpenCensusScope(openCensusTracer.withSpan(openCensusSpan.getOpenCensusSpan())); } } + + Attributes createTransactionAttributes(TransactionOption... options) { + if (options != null && options.length > 0) { + Optional tagOption = + Arrays.stream(options) + .filter(option -> option instanceof TagOption) + .map(option -> (TagOption) option) + .findAny(); + if (tagOption.isPresent()) { + return Attributes.of(TRANSACTION_TAG_KEY, tagOption.get().getTag()); + } + } + return Attributes.empty(); + } + + Attributes createStatementAttributes(Statement statement, Options options) { + if (this.enableExtendedTracing || (options != null && options.hasTag())) { + AttributesBuilder builder = Attributes.builder(); + if (this.enableExtendedTracing) { + builder.put(DB_STATEMENT_KEY, statement.getSql()); + builder.put(THREAD_NAME_KEY, getTraceThreadName()); + } + if (options != null && options.hasTag()) { + builder.put(STATEMENT_TAG_KEY, options.tag()); + } + return builder.build(); + } + return Attributes.empty(); + } + + Attributes createStatementBatchAttributes(Iterable statements, Options options) { + if (this.enableExtendedTracing || (options != null && options.hasTag())) { + AttributesBuilder builder = Attributes.builder(); + if (this.enableExtendedTracing) { + builder.put( + DB_STATEMENT_ARRAY_KEY, + StreamSupport.stream(statements.spliterator(), false) + .map(Statement::getSql) + .collect(Collectors.toList())); + builder.put(THREAD_NAME_KEY, getTraceThreadName()); + } + if (options != null && options.hasTag()) { + builder.put(STATEMENT_TAG_KEY, options.tag()); + } + return builder.build(); + } + return Attributes.empty(); + } + + private static String getTraceThreadName() { + return MoreObjects.firstNonNull( + Context.current().get(OpenTelemetryContextKeys.THREAD_NAME_KEY), + Thread.currentThread().getName()); + } } diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionRunnerImpl.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionRunnerImpl.java index 4deeeb92af8..c8bf6dc833b 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionRunnerImpl.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionRunnerImpl.java @@ -18,6 +18,8 @@ import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerBatchUpdateException; import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerException; +import static com.google.cloud.spanner.SpannerImpl.BATCH_UPDATE; +import static com.google.cloud.spanner.SpannerImpl.UPDATE; import static com.google.common.base.Preconditions.checkNotNull; import static com.google.common.base.Preconditions.checkState; @@ -216,6 +218,11 @@ private TransactionContextImpl(Builder builder) { session.getOptions(), ThreadLocalRandom.current().nextLong(Long.MAX_VALUE)); } + @Override + protected boolean isReadOnly() { + return false; + } + @Override protected boolean isRouteToLeader() { return true; @@ -275,7 +282,8 @@ ApiFuture ensureTxnAsync() { private void createTxnAsync(final SettableApiFuture res) { span.addAnnotation("Creating Transaction"); - final ApiFuture fut = session.beginTransactionAsync(options, isRouteToLeader()); + final ApiFuture fut = + session.beginTransactionAsync(options, isRouteToLeader(), getTransactionChannelHint()); fut.addListener( () -> { try { @@ -299,12 +307,23 @@ private void createTxnAsync(final SettableApiFuture res) { void commit() { try { - commitResponse = commitAsync().get(); - } catch (InterruptedException e) { + // Normally, Gax will take care of any timeouts, but we add a timeout for getting the value + // from the future here as well to make sure the call always finishes, even if the future + // never resolves. + commitResponse = + commitAsync() + .get( + rpc.getCommitRetrySettings().getTotalTimeout().getSeconds() + 5, + TimeUnit.SECONDS); + } catch (InterruptedException | TimeoutException e) { if (commitFuture != null) { commitFuture.cancel(true); } - throw SpannerExceptionFactory.propagateInterrupt(e); + if (e instanceof InterruptedException) { + throw SpannerExceptionFactory.propagateInterrupt((InterruptedException) e); + } else { + throw SpannerExceptionFactory.propagateTimeout((TimeoutException) e); + } } catch (ExecutionException e) { throw SpannerExceptionFactory.newSpannerException(e.getCause() == null ? e : e.getCause()); } @@ -406,13 +425,23 @@ public void run() { } final CommitRequest commitRequest = requestBuilder.build(); span.addAnnotation("Starting Commit"); + final ApiFuture commitFuture; final ISpan opSpan = tracer.spanBuilderWithExplicitParent(SpannerImpl.COMMIT, span); - final ApiFuture commitFuture = - rpc.commitAsync(commitRequest, session.getOptions()); + try (IScope ignore = tracer.withSpan(opSpan)) { + commitFuture = rpc.commitAsync(commitRequest, getTransactionChannelHint()); + } session.markUsed(clock.instant()); commitFuture.addListener( () -> { - try (IScope s = tracer.withSpan(opSpan)) { + try (IScope ignore = tracer.withSpan(opSpan)) { + if (!commitFuture.isDone()) { + // This should not be possible, considering that we are in a listener for the + // future, but we add a result here as well as a safety precaution. + res.setException( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INTERNAL, "commitFuture is not done")); + return; + } com.google.spanner.v1.CommitResponse proto = commitFuture.get(); if (!proto.hasCommitTimestamp()) { throw newSpannerException( @@ -421,20 +450,28 @@ public void run() { span.addAnnotation("Commit Done"); opSpan.end(); res.set(new CommitResponse(proto)); - } catch (Throwable e) { - if (e instanceof ExecutionException) { - e = - SpannerExceptionFactory.newSpannerException( - e.getCause() == null ? e : e.getCause()); - } else if (e instanceof InterruptedException) { - e = SpannerExceptionFactory.propagateInterrupt((InterruptedException) e); - } else { - e = SpannerExceptionFactory.newSpannerException(e); + } catch (Throwable throwable) { + SpannerException resultException; + try { + if (throwable instanceof ExecutionException) { + resultException = + SpannerExceptionFactory.asSpannerException( + throwable.getCause() == null ? throwable : throwable.getCause()); + } else if (throwable instanceof InterruptedException) { + resultException = + SpannerExceptionFactory.propagateInterrupt( + (InterruptedException) throwable); + } else { + resultException = SpannerExceptionFactory.asSpannerException(throwable); + } + span.addAnnotation("Commit Failed", resultException); + opSpan.setStatus(resultException); + opSpan.end(); + res.setException(onError(resultException, false)); + } catch (Throwable unexpectedError) { + // This is a safety precaution to make sure that a result is always returned. + res.setException(unexpectedError); } - span.addAnnotation("Commit Failed", e); - opSpan.setStatus(e); - opSpan.end(); - res.setException(onError((SpannerException) e, false)); } }, MoreExecutors.directExecutor()); @@ -442,9 +479,6 @@ public void run() { res.setException(SpannerExceptionFactory.propagateInterrupt(e)); } catch (TimeoutException e) { res.setException(SpannerExceptionFactory.propagateTimeout(e)); - } catch (ExecutionException e) { - res.setException( - SpannerExceptionFactory.newSpannerException(e.getCause() == null ? e : e.getCause())); } catch (Throwable e) { res.setException( SpannerExceptionFactory.newSpannerException(e.getCause() == null ? e : e.getCause())); @@ -492,7 +526,7 @@ ApiFuture rollbackAsync() { .setSession(session.getName()) .setTransactionId(transactionId) .build(), - session.getOptions()); + getTransactionChannelHint()); session.markUsed(clock.instant()); return apiFuture; } else { @@ -722,7 +756,7 @@ public com.google.cloud.spanner.ResultSet analyzeUpdateStatement( } private ResultSet internalAnalyzeStatement( - Statement statement, QueryAnalyzeMode analyzeMode, UpdateOption... options) { + Statement statement, QueryAnalyzeMode analyzeMode, UpdateOption... updateOptions) { Preconditions.checkNotNull(analyzeMode); QueryMode queryMode; switch (analyzeMode) { @@ -736,20 +770,28 @@ private ResultSet internalAnalyzeStatement( throw SpannerExceptionFactory.newSpannerException( ErrorCode.INVALID_ARGUMENT, "Unknown analyze mode: " + analyzeMode); } + final Options options = Options.fromUpdateOptions(updateOptions); return internalExecuteUpdate(statement, queryMode, options); } @Override - public long executeUpdate(Statement statement, UpdateOption... options) { - ResultSet resultSet = internalExecuteUpdate(statement, QueryMode.NORMAL, options); - // For standard DML, using the exact row count. - return resultSet.getStats().getRowCountExact(); + public long executeUpdate(Statement statement, UpdateOption... updateOptions) { + final Options options = Options.fromUpdateOptions(updateOptions); + ISpan span = + tracer.spanBuilderWithExplicitParent( + UPDATE, this.span, this.tracer.createStatementAttributes(statement, options)); + try (IScope ignore = tracer.withSpan(span)) { + ResultSet resultSet = internalExecuteUpdate(statement, QueryMode.NORMAL, options); + // For standard DML, using the exact row count. + return resultSet.getStats().getRowCountExact(); + } finally { + span.end(); + } } private ResultSet internalExecuteUpdate( - Statement statement, QueryMode queryMode, UpdateOption... updateOptions) { + Statement statement, QueryMode queryMode, Options options) { beforeReadOrQuery(); - final Options options = Options.fromUpdateOptions(updateOptions); if (options.withExcludeTxnFromChangeStreams() != null) { throw newSpannerException( ErrorCode.INVALID_ARGUMENT, DML_INVALID_EXCLUDE_CHANGE_STREAMS_OPTION_MESSAGE); @@ -759,7 +801,7 @@ private ResultSet internalExecuteUpdate( statement, queryMode, options, /* withTransactionSelector = */ true); try { com.google.spanner.v1.ResultSet resultSet = - rpc.executeQuery(builder.build(), session.getOptions(), isRouteToLeader()); + rpc.executeQuery(builder.build(), getTransactionChannelHint(), isRouteToLeader()); session.markUsed(clock.instant()); if (resultSet.getMetadata().hasTransaction()) { onTransactionMetadata( @@ -778,70 +820,81 @@ private ResultSet internalExecuteUpdate( @Override public ApiFuture executeUpdateAsync(Statement statement, UpdateOption... updateOptions) { - beforeReadOrQuery(); final Options options = Options.fromUpdateOptions(updateOptions); - if (options.withExcludeTxnFromChangeStreams() != null) { - throw newSpannerException( - ErrorCode.INVALID_ARGUMENT, DML_INVALID_EXCLUDE_CHANGE_STREAMS_OPTION_MESSAGE); - } - final ExecuteSqlRequest.Builder builder = - getExecuteSqlRequestBuilder( - statement, QueryMode.NORMAL, options, /* withTransactionSelector = */ true); - final ApiFuture resultSet; - try { - // Register the update as an async operation that must finish before the transaction may - // commit. - increaseAsyncOperations(); - resultSet = rpc.executeQueryAsync(builder.build(), session.getOptions(), isRouteToLeader()); - session.markUsed(clock.instant()); - } catch (Throwable t) { - decreaseAsyncOperations(); - throw t; - } - ApiFuture updateCount = - ApiFutures.transform( - resultSet, - input -> { - if (!input.hasStats()) { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.INVALID_ARGUMENT, - "DML response missing stats possibly due to non-DML statement as input"); - } - if (builder.getTransaction().hasBegin() - && !(input.getMetadata().hasTransaction() - && input.getMetadata().getTransaction().getId() != ByteString.EMPTY)) { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.FAILED_PRECONDITION, NO_TRANSACTION_RETURNED_MSG); + ISpan span = + tracer.spanBuilderWithExplicitParent( + UPDATE, this.span, this.tracer.createStatementAttributes(statement, options)); + try (IScope ignore = tracer.withSpan(span)) { + beforeReadOrQuery(); + if (options.withExcludeTxnFromChangeStreams() != null) { + throw newSpannerException( + ErrorCode.INVALID_ARGUMENT, DML_INVALID_EXCLUDE_CHANGE_STREAMS_OPTION_MESSAGE); + } + final ExecuteSqlRequest.Builder builder = + getExecuteSqlRequestBuilder( + statement, QueryMode.NORMAL, options, /* withTransactionSelector = */ true); + final ApiFuture resultSet; + try { + // Register the update as an async operation that must finish before the transaction may + // commit. + increaseAsyncOperations(); + resultSet = + rpc.executeQueryAsync( + builder.build(), getTransactionChannelHint(), isRouteToLeader()); + session.markUsed(clock.instant()); + } catch (Throwable t) { + decreaseAsyncOperations(); + throw t; + } + ApiFuture updateCount = + ApiFutures.transform( + resultSet, + input -> { + if (!input.hasStats()) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "DML response missing stats possibly due to non-DML statement as input"); + } + if (builder.getTransaction().hasBegin() + && !(input.getMetadata().hasTransaction() + && input.getMetadata().getTransaction().getId() != ByteString.EMPTY)) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, NO_TRANSACTION_RETURNED_MSG); + } + // For standard DML, using the exact row count. + return input.getStats().getRowCountExact(); + }, + MoreExecutors.directExecutor()); + updateCount = + ApiFutures.catching( + updateCount, + Throwable.class, + input -> { + SpannerException e = SpannerExceptionFactory.asSpannerException(input); + SpannerException exceptionToThrow = + onError(e, builder.getTransaction().hasBegin()); + span.setStatus(exceptionToThrow); + throw exceptionToThrow; + }, + MoreExecutors.directExecutor()); + updateCount.addListener( + () -> { + try { + if (resultSet.get().getMetadata().hasTransaction()) { + onTransactionMetadata( + resultSet.get().getMetadata().getTransaction(), + builder.getTransaction().hasBegin()); } - // For standard DML, using the exact row count. - return input.getStats().getRowCountExact(); - }, - MoreExecutors.directExecutor()); - updateCount = - ApiFutures.catching( - updateCount, - Throwable.class, - input -> { - SpannerException e = SpannerExceptionFactory.asSpannerException(input); - throw onError(e, builder.getTransaction().hasBegin()); - }, - MoreExecutors.directExecutor()); - updateCount.addListener( - () -> { - try { - if (resultSet.get().getMetadata().hasTransaction()) { - onTransactionMetadata( - resultSet.get().getMetadata().getTransaction(), - builder.getTransaction().hasBegin()); + } catch (Throwable e) { + // Ignore this error here as it is handled by the future that is returned by the + // executeUpdateAsync method. } - } catch (Throwable e) { - // Ignore this error here as it is handled by the future that is returned by the - // executeUpdateAsync method. - } - decreaseAsyncOperations(); - }, - MoreExecutors.directExecutor()); - return updateCount; + span.end(); + decreaseAsyncOperations(); + }, + MoreExecutors.directExecutor()); + return updateCount; + } } private SpannerException createAbortedExceptionForBatchDml(ExecuteBatchDmlResponse response) { @@ -859,104 +912,131 @@ private SpannerException createAbortedExceptionForBatchDml(ExecuteBatchDmlRespon @Override public long[] batchUpdate(Iterable statements, UpdateOption... updateOptions) { - beforeReadOrQuery(); final Options options = Options.fromUpdateOptions(updateOptions); - if (options.withExcludeTxnFromChangeStreams() != null) { - throw newSpannerException( - ErrorCode.INVALID_ARGUMENT, DML_INVALID_EXCLUDE_CHANGE_STREAMS_OPTION_MESSAGE); - } - final ExecuteBatchDmlRequest.Builder builder = - getExecuteBatchDmlRequestBuilder(statements, options); - try { - com.google.spanner.v1.ExecuteBatchDmlResponse response = - rpc.executeBatchDml(builder.build(), session.getOptions()); - session.markUsed(clock.instant()); - long[] results = new long[response.getResultSetsCount()]; - for (int i = 0; i < response.getResultSetsCount(); ++i) { - results[i] = response.getResultSets(i).getStats().getRowCountExact(); - if (response.getResultSets(i).getMetadata().hasTransaction()) { - onTransactionMetadata( - response.getResultSets(i).getMetadata().getTransaction(), - builder.getTransaction().hasBegin()); - } + ISpan span = + tracer.spanBuilderWithExplicitParent( + BATCH_UPDATE, + this.span, + this.tracer.createStatementBatchAttributes(statements, options)); + try (IScope ignore = tracer.withSpan(span)) { + beforeReadOrQuery(); + if (options.withExcludeTxnFromChangeStreams() != null) { + throw newSpannerException( + ErrorCode.INVALID_ARGUMENT, DML_INVALID_EXCLUDE_CHANGE_STREAMS_OPTION_MESSAGE); } + final ExecuteBatchDmlRequest.Builder builder = + getExecuteBatchDmlRequestBuilder(statements, options); + try { + com.google.spanner.v1.ExecuteBatchDmlResponse response = + rpc.executeBatchDml(builder.build(), getTransactionChannelHint()); + session.markUsed(clock.instant()); + long[] results = new long[response.getResultSetsCount()]; + for (int i = 0; i < response.getResultSetsCount(); ++i) { + results[i] = response.getResultSets(i).getStats().getRowCountExact(); + if (response.getResultSets(i).getMetadata().hasTransaction()) { + onTransactionMetadata( + response.getResultSets(i).getMetadata().getTransaction(), + builder.getTransaction().hasBegin()); + } + } - // If one of the DML statements was aborted, we should throw an aborted exception. - // In all other cases, we should throw a BatchUpdateException. - if (response.getStatus().getCode() == Code.ABORTED_VALUE) { - throw createAbortedExceptionForBatchDml(response); - } else if (response.getStatus().getCode() != 0) { - throw newSpannerBatchUpdateException( - ErrorCode.fromRpcStatus(response.getStatus()), - response.getStatus().getMessage(), - results); + // If one of the DML statements was aborted, we should throw an aborted exception. + // In all other cases, we should throw a BatchUpdateException. + if (response.getStatus().getCode() == Code.ABORTED_VALUE) { + throw createAbortedExceptionForBatchDml(response); + } else if (response.getStatus().getCode() != 0) { + throw newSpannerBatchUpdateException( + ErrorCode.fromRpcStatus(response.getStatus()), + response.getStatus().getMessage(), + results); + } + return results; + } catch (Throwable e) { + throw onError( + SpannerExceptionFactory.asSpannerException(e), builder.getTransaction().hasBegin()); } - return results; - } catch (Throwable e) { - throw onError( - SpannerExceptionFactory.asSpannerException(e), builder.getTransaction().hasBegin()); + } catch (Throwable throwable) { + span.setStatus(throwable); + throw throwable; + } finally { + span.end(); } } @Override public ApiFuture batchUpdateAsync( Iterable statements, UpdateOption... updateOptions) { - beforeReadOrQuery(); final Options options = Options.fromUpdateOptions(updateOptions); - if (options.withExcludeTxnFromChangeStreams() != null) { - throw newSpannerException( - ErrorCode.INVALID_ARGUMENT, DML_INVALID_EXCLUDE_CHANGE_STREAMS_OPTION_MESSAGE); - } - final ExecuteBatchDmlRequest.Builder builder = - getExecuteBatchDmlRequestBuilder(statements, options); - ApiFuture response; - try { - // Register the update as an async operation that must finish before the transaction may - // commit. - increaseAsyncOperations(); - response = rpc.executeBatchDmlAsync(builder.build(), session.getOptions()); - session.markUsed(clock.instant()); - } catch (Throwable t) { - decreaseAsyncOperations(); - throw t; - } - ApiFuture updateCounts = - ApiFutures.transform( - response, - batchDmlResponse -> { - long[] results = new long[batchDmlResponse.getResultSetsCount()]; - for (int i = 0; i < batchDmlResponse.getResultSetsCount(); ++i) { - results[i] = batchDmlResponse.getResultSets(i).getStats().getRowCountExact(); - if (batchDmlResponse.getResultSets(i).getMetadata().hasTransaction()) { - onTransactionMetadata( - batchDmlResponse.getResultSets(i).getMetadata().getTransaction(), - builder.getTransaction().hasBegin()); + ISpan span = + tracer.spanBuilderWithExplicitParent( + BATCH_UPDATE, + this.span, + this.tracer.createStatementBatchAttributes(statements, options)); + try (IScope ignore = tracer.withSpan(span)) { + beforeReadOrQuery(); + if (options.withExcludeTxnFromChangeStreams() != null) { + throw newSpannerException( + ErrorCode.INVALID_ARGUMENT, DML_INVALID_EXCLUDE_CHANGE_STREAMS_OPTION_MESSAGE); + } + final ExecuteBatchDmlRequest.Builder builder = + getExecuteBatchDmlRequestBuilder(statements, options); + ApiFuture response; + try { + // Register the update as an async operation that must finish before the transaction may + // commit. + increaseAsyncOperations(); + response = rpc.executeBatchDmlAsync(builder.build(), getTransactionChannelHint()); + session.markUsed(clock.instant()); + } catch (Throwable t) { + decreaseAsyncOperations(); + throw t; + } + ApiFuture updateCounts = + ApiFutures.transform( + response, + batchDmlResponse -> { + long[] results = new long[batchDmlResponse.getResultSetsCount()]; + for (int i = 0; i < batchDmlResponse.getResultSetsCount(); ++i) { + results[i] = batchDmlResponse.getResultSets(i).getStats().getRowCountExact(); + if (batchDmlResponse.getResultSets(i).getMetadata().hasTransaction()) { + onTransactionMetadata( + batchDmlResponse.getResultSets(i).getMetadata().getTransaction(), + builder.getTransaction().hasBegin()); + } } - } - // If one of the DML statements was aborted, we should throw an aborted exception. - // In all other cases, we should throw a BatchUpdateException. - if (batchDmlResponse.getStatus().getCode() == Code.ABORTED_VALUE) { - throw createAbortedExceptionForBatchDml(batchDmlResponse); - } else if (batchDmlResponse.getStatus().getCode() != 0) { - throw newSpannerBatchUpdateException( - ErrorCode.fromRpcStatus(batchDmlResponse.getStatus()), - batchDmlResponse.getStatus().getMessage(), - results); - } - return results; - }, - MoreExecutors.directExecutor()); - updateCounts = - ApiFutures.catching( - updateCounts, - Throwable.class, - input -> { - SpannerException e = SpannerExceptionFactory.asSpannerException(input); - throw onError(e, builder.getTransaction().hasBegin()); - }, - MoreExecutors.directExecutor()); - updateCounts.addListener(this::decreaseAsyncOperations, MoreExecutors.directExecutor()); - return updateCounts; + // If one of the DML statements was aborted, we should throw an aborted exception. + // In all other cases, we should throw a BatchUpdateException. + if (batchDmlResponse.getStatus().getCode() == Code.ABORTED_VALUE) { + throw createAbortedExceptionForBatchDml(batchDmlResponse); + } else if (batchDmlResponse.getStatus().getCode() != 0) { + throw newSpannerBatchUpdateException( + ErrorCode.fromRpcStatus(batchDmlResponse.getStatus()), + batchDmlResponse.getStatus().getMessage(), + results); + } + return results; + }, + MoreExecutors.directExecutor()); + updateCounts = + ApiFutures.catching( + updateCounts, + Throwable.class, + input -> { + SpannerException e = SpannerExceptionFactory.asSpannerException(input); + SpannerException exceptionToThrow = + onError(e, builder.getTransaction().hasBegin()); + span.setStatus(exceptionToThrow); + throw exceptionToThrow; + }, + MoreExecutors.directExecutor()); + updateCounts.addListener( + () -> { + span.end(); + decreaseAsyncOperations(); + }, + MoreExecutors.directExecutor()); + return updateCounts; + } } private ListenableAsyncResultSet wrap(ListenableAsyncResultSet delegate) { @@ -1102,7 +1182,7 @@ private T runInternal(final TransactionCallable txCallable) { throw e; } }; - return SpannerRetryHelper.runTxWithRetriesOnAborted(retryCallable); + return SpannerRetryHelper.runTxWithRetriesOnAborted(retryCallable, session.getErrorHandler()); } @Override diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Type.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Type.java index 4d93a9bfb02..748cb7f87ec 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Type.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/Type.java @@ -147,7 +147,7 @@ public static Type pgOid() { /** * To get the descriptor for the {@code PROTO} type. * - * @param protoTypeFqn Proto fully qualified name (ex: "spanner.examples.music.SingerInfo"). + * @param protoTypeFqn Proto fully qualified name (ex: "examples.spanner.music.SingerInfo"). */ public static Type proto(String protoTypeFqn) { return new Type(Code.PROTO, protoTypeFqn); @@ -156,7 +156,7 @@ public static Type proto(String protoTypeFqn) { /** * To get the descriptor for the {@code ENUM} type. * - * @param protoTypeFqn Proto ENUM fully qualified name (ex: "spanner.examples.music.Genre") + * @param protoTypeFqn Proto ENUM fully qualified name (ex: "examples.spanner.music.Genre") */ public static Type protoEnum(String protoTypeFqn) { return new Type(Code.ENUM, protoTypeFqn); diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClient.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClient.java index a1ed958be4b..dd00f6750c7 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClient.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClient.java @@ -43,24 +43,31 @@ import com.google.protobuf.Timestamp; import com.google.spanner.admin.database.v1.Backup; import com.google.spanner.admin.database.v1.BackupName; +import com.google.spanner.admin.database.v1.BackupSchedule; +import com.google.spanner.admin.database.v1.BackupScheduleName; import com.google.spanner.admin.database.v1.CopyBackupMetadata; import com.google.spanner.admin.database.v1.CopyBackupRequest; import com.google.spanner.admin.database.v1.CreateBackupMetadata; import com.google.spanner.admin.database.v1.CreateBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupScheduleRequest; import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; import com.google.spanner.admin.database.v1.CreateDatabaseRequest; import com.google.spanner.admin.database.v1.Database; import com.google.spanner.admin.database.v1.DatabaseName; import com.google.spanner.admin.database.v1.DatabaseRole; import com.google.spanner.admin.database.v1.DeleteBackupRequest; +import com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest; import com.google.spanner.admin.database.v1.DropDatabaseRequest; import com.google.spanner.admin.database.v1.GetBackupRequest; +import com.google.spanner.admin.database.v1.GetBackupScheduleRequest; import com.google.spanner.admin.database.v1.GetDatabaseDdlRequest; import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; import com.google.spanner.admin.database.v1.GetDatabaseRequest; import com.google.spanner.admin.database.v1.InstanceName; import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; +import com.google.spanner.admin.database.v1.ListBackupSchedulesRequest; +import com.google.spanner.admin.database.v1.ListBackupSchedulesResponse; import com.google.spanner.admin.database.v1.ListBackupsRequest; import com.google.spanner.admin.database.v1.ListBackupsResponse; import com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest; @@ -72,6 +79,7 @@ import com.google.spanner.admin.database.v1.RestoreDatabaseMetadata; import com.google.spanner.admin.database.v1.RestoreDatabaseRequest; import com.google.spanner.admin.database.v1.UpdateBackupRequest; +import com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest; import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; import com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest; import com.google.spanner.admin.database.v1.UpdateDatabaseMetadata; @@ -86,8 +94,8 @@ * Service Description: Cloud Spanner Database Admin API * *

The Cloud Spanner Database Admin API can be used to: * create, drop, and list databases - * * update the schema of pre-existing databases * create, delete and list backups for a - * database * restore a database from an existing backup + * * update the schema of pre-existing databases * create, delete, copy and list backups for + * a database * restore a database from an existing backup * *

This class provides the ability to make remote calls to the backing service through method * calls that map to API methods. Sample code to get started: @@ -199,7 +207,7 @@ * * *

UpdateDatabaseDdl - *

Updates the schema of a Cloud Spanner database by creating/altering/dropping tables, columns, indexes, etc. The returned [long-running operation][google.longrunning.Operation] will have a name of the format `<database_name>/operations/<operation_id>` and can be used to track execution of the schema change(s). The [metadata][google.longrunning.Operation.metadata] field type is [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response. + *

Updates the schema of a Cloud Spanner database by creating/altering/dropping tables, columns, indexes, etc. The returned [long-running operation][google.longrunning.Operation] will have a name of the format `<database_name>/operations/<operation_id>` and can be used to track execution of the schema change(s). The [metadata][google.longrunning.Operation.metadata] field type is [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response. * *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

*
    @@ -337,7 +345,7 @@ * * *

    CopyBackup - *

    Starts copying a Cloud Spanner Backup. The returned backup [long-running operation][google.longrunning.Operation] will have a name of the format `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>` and can be used to track copying of the backup. The operation is associated with the destination backup. The [metadata][google.longrunning.Operation.metadata] field type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. The [response][google.longrunning.Operation.response] field type is [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned operation will stop the copying and delete the backup. Concurrent CopyBackup requests can run on the same source backup. + *

    Starts copying a Cloud Spanner Backup. The returned backup [long-running operation][google.longrunning.Operation] will have a name of the format `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>` and can be used to track copying of the backup. The operation is associated with the destination backup. The [metadata][google.longrunning.Operation.metadata] field type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. The [response][google.longrunning.Operation.response] field type is [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned operation will stop the copying and delete the destination backup. Concurrent CopyBackup requests can run on the same source backup. * *

    Request object method variants only take one parameter, a request object, which must be constructed before the call.

    *
      @@ -515,6 +523,101 @@ *
    * * + * + *

    CreateBackupSchedule + *

    Creates a new backup schedule. + * + *

    Request object method variants only take one parameter, a request object, which must be constructed before the call.

    + *
      + *
    • createBackupSchedule(CreateBackupScheduleRequest request) + *

    + *

    "Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

    + *
      + *
    • createBackupSchedule(DatabaseName parent, BackupSchedule backupSchedule, String backupScheduleId) + *

    • createBackupSchedule(String parent, BackupSchedule backupSchedule, String backupScheduleId) + *

    + *

    Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

    + *
      + *
    • createBackupScheduleCallable() + *

    + * + * + * + *

    GetBackupSchedule + *

    Gets backup schedule for the input schedule name. + * + *

    Request object method variants only take one parameter, a request object, which must be constructed before the call.

    + *
      + *
    • getBackupSchedule(GetBackupScheduleRequest request) + *

    + *

    "Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

    + *
      + *
    • getBackupSchedule(BackupScheduleName name) + *

    • getBackupSchedule(String name) + *

    + *

    Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

    + *
      + *
    • getBackupScheduleCallable() + *

    + * + * + * + *

    UpdateBackupSchedule + *

    Updates a backup schedule. + * + *

    Request object method variants only take one parameter, a request object, which must be constructed before the call.

    + *
      + *
    • updateBackupSchedule(UpdateBackupScheduleRequest request) + *

    + *

    "Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

    + *
      + *
    • updateBackupSchedule(BackupSchedule backupSchedule, FieldMask updateMask) + *

    + *

    Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

    + *
      + *
    • updateBackupScheduleCallable() + *

    + * + * + * + *

    DeleteBackupSchedule + *

    Deletes a backup schedule. + * + *

    Request object method variants only take one parameter, a request object, which must be constructed before the call.

    + *
      + *
    • deleteBackupSchedule(DeleteBackupScheduleRequest request) + *

    + *

    "Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

    + *
      + *
    • deleteBackupSchedule(BackupScheduleName name) + *

    • deleteBackupSchedule(String name) + *

    + *

    Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

    + *
      + *
    • deleteBackupScheduleCallable() + *

    + * + * + * + *

    ListBackupSchedules + *

    Lists all the backup schedules for the database. + * + *

    Request object method variants only take one parameter, a request object, which must be constructed before the call.

    + *
      + *
    • listBackupSchedules(ListBackupSchedulesRequest request) + *

    + *

    "Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

    + *
      + *
    • listBackupSchedules(DatabaseName parent) + *

    • listBackupSchedules(String parent) + *

    + *

    Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

    + *
      + *
    • listBackupSchedulesPagedCallable() + *

    • listBackupSchedulesCallable() + *

    + * + * * * *

    See the individual methods for example code. @@ -2459,8 +2562,8 @@ public final UnaryCallable createBackupCallable( * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. The * [response][google.longrunning.Operation.response] field type is * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned - * operation will stop the copying and delete the backup. Concurrent CopyBackup requests can run - * on the same source backup. + * operation will stop the copying and delete the destination backup. Concurrent CopyBackup + * requests can run on the same source backup. * *

    Sample code: * @@ -2517,8 +2620,8 @@ public final OperationFuture copyBackupAsync( * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. The * [response][google.longrunning.Operation.response] field type is * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned - * operation will stop the copying and delete the backup. Concurrent CopyBackup requests can run - * on the same source backup. + * operation will stop the copying and delete the destination backup. Concurrent CopyBackup + * requests can run on the same source backup. * *

    Sample code: * @@ -2575,8 +2678,8 @@ public final OperationFuture copyBackupAsync( * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. The * [response][google.longrunning.Operation.response] field type is * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned - * operation will stop the copying and delete the backup. Concurrent CopyBackup requests can run - * on the same source backup. + * operation will stop the copying and delete the destination backup. Concurrent CopyBackup + * requests can run on the same source backup. * *

    Sample code: * @@ -2633,8 +2736,8 @@ public final OperationFuture copyBackupAsync( * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. The * [response][google.longrunning.Operation.response] field type is * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned - * operation will stop the copying and delete the backup. Concurrent CopyBackup requests can run - * on the same source backup. + * operation will stop the copying and delete the destination backup. Concurrent CopyBackup + * requests can run on the same source backup. * *

    Sample code: * @@ -2691,8 +2794,8 @@ public final OperationFuture copyBackupAsync( * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. The * [response][google.longrunning.Operation.response] field type is * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned - * operation will stop the copying and delete the backup. Concurrent CopyBackup requests can run - * on the same source backup. + * operation will stop the copying and delete the destination backup. Concurrent CopyBackup + * requests can run on the same source backup. * *

    Sample code: * @@ -2733,8 +2836,8 @@ public final OperationFuture copyBackupAsync( * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. The * [response][google.longrunning.Operation.response] field type is * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned - * operation will stop the copying and delete the backup. Concurrent CopyBackup requests can run - * on the same source backup. + * operation will stop the copying and delete the destination backup. Concurrent CopyBackup + * requests can run on the same source backup. * *

    Sample code: * @@ -2775,8 +2878,8 @@ public final OperationFuture copyBackupAsync( * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. The * [response][google.longrunning.Operation.response] field type is * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned - * operation will stop the copying and delete the backup. Concurrent CopyBackup requests can run - * on the same source backup. + * operation will stop the copying and delete the destination backup. Concurrent CopyBackup + * requests can run on the same source backup. * *

    Sample code: * @@ -4069,7 +4172,7 @@ public final ListBackupOperationsPagedResponse listBackupOperations( * } * * @param parent Required. The database whose roles should be listed. Values are of the form - * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles`. + * `projects/<project>/instances/<instance>/databases/<database>`. * @throws com.google.api.gax.rpc.ApiException if the remote call fails */ public final ListDatabaseRolesPagedResponse listDatabaseRoles(DatabaseName parent) { @@ -4101,7 +4204,7 @@ public final ListDatabaseRolesPagedResponse listDatabaseRoles(DatabaseName paren * } * * @param parent Required. The database whose roles should be listed. Values are of the form - * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles`. + * `projects/<project>/instances/<instance>/databases/<database>`. * @throws com.google.api.gax.rpc.ApiException if the remote call fails */ public final ListDatabaseRolesPagedResponse listDatabaseRoles(String parent) { @@ -4215,58 +4318,703 @@ public final ListDatabaseRolesPagedResponse listDatabaseRoles(ListDatabaseRolesR return stub.listDatabaseRolesCallable(); } - @Override - public final void close() { - stub.close(); + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new backup schedule. + * + *

    Sample code: + * + *

    {@code
    +   * // This snippet has been automatically generated and should be regarded as a code template only.
    +   * // It will require modifications to work:
    +   * // - It may require correct/in-range values for request initialization.
    +   * // - It may require specifying regional endpoints when creating the service client as shown in
    +   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    +   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
    +   *   DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]");
    +   *   BackupSchedule backupSchedule = BackupSchedule.newBuilder().build();
    +   *   String backupScheduleId = "backupScheduleId1704974708";
    +   *   BackupSchedule response =
    +   *       databaseAdminClient.createBackupSchedule(parent, backupSchedule, backupScheduleId);
    +   * }
    +   * }
    + * + * @param parent Required. The name of the database that this backup schedule applies to. + * @param backupSchedule Required. The backup schedule to create. + * @param backupScheduleId Required. The Id to use for the backup schedule. The + * `backup_schedule_id` appended to `parent` forms the full backup schedule name of the form + * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BackupSchedule createBackupSchedule( + DatabaseName parent, BackupSchedule backupSchedule, String backupScheduleId) { + CreateBackupScheduleRequest request = + CreateBackupScheduleRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .setBackupSchedule(backupSchedule) + .setBackupScheduleId(backupScheduleId) + .build(); + return createBackupSchedule(request); } - @Override - public void shutdown() { - stub.shutdown(); + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new backup schedule. + * + *

    Sample code: + * + *

    {@code
    +   * // This snippet has been automatically generated and should be regarded as a code template only.
    +   * // It will require modifications to work:
    +   * // - It may require correct/in-range values for request initialization.
    +   * // - It may require specifying regional endpoints when creating the service client as shown in
    +   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    +   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
    +   *   String parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString();
    +   *   BackupSchedule backupSchedule = BackupSchedule.newBuilder().build();
    +   *   String backupScheduleId = "backupScheduleId1704974708";
    +   *   BackupSchedule response =
    +   *       databaseAdminClient.createBackupSchedule(parent, backupSchedule, backupScheduleId);
    +   * }
    +   * }
    + * + * @param parent Required. The name of the database that this backup schedule applies to. + * @param backupSchedule Required. The backup schedule to create. + * @param backupScheduleId Required. The Id to use for the backup schedule. The + * `backup_schedule_id` appended to `parent` forms the full backup schedule name of the form + * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BackupSchedule createBackupSchedule( + String parent, BackupSchedule backupSchedule, String backupScheduleId) { + CreateBackupScheduleRequest request = + CreateBackupScheduleRequest.newBuilder() + .setParent(parent) + .setBackupSchedule(backupSchedule) + .setBackupScheduleId(backupScheduleId) + .build(); + return createBackupSchedule(request); } - @Override - public boolean isShutdown() { - return stub.isShutdown(); + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new backup schedule. + * + *

    Sample code: + * + *

    {@code
    +   * // This snippet has been automatically generated and should be regarded as a code template only.
    +   * // It will require modifications to work:
    +   * // - It may require correct/in-range values for request initialization.
    +   * // - It may require specifying regional endpoints when creating the service client as shown in
    +   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    +   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
    +   *   CreateBackupScheduleRequest request =
    +   *       CreateBackupScheduleRequest.newBuilder()
    +   *           .setParent(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
    +   *           .setBackupScheduleId("backupScheduleId1704974708")
    +   *           .setBackupSchedule(BackupSchedule.newBuilder().build())
    +   *           .build();
    +   *   BackupSchedule response = databaseAdminClient.createBackupSchedule(request);
    +   * }
    +   * }
    + * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BackupSchedule createBackupSchedule(CreateBackupScheduleRequest request) { + return createBackupScheduleCallable().call(request); } - @Override - public boolean isTerminated() { - return stub.isTerminated(); + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Creates a new backup schedule. + * + *

    Sample code: + * + *

    {@code
    +   * // This snippet has been automatically generated and should be regarded as a code template only.
    +   * // It will require modifications to work:
    +   * // - It may require correct/in-range values for request initialization.
    +   * // - It may require specifying regional endpoints when creating the service client as shown in
    +   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    +   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
    +   *   CreateBackupScheduleRequest request =
    +   *       CreateBackupScheduleRequest.newBuilder()
    +   *           .setParent(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
    +   *           .setBackupScheduleId("backupScheduleId1704974708")
    +   *           .setBackupSchedule(BackupSchedule.newBuilder().build())
    +   *           .build();
    +   *   ApiFuture future =
    +   *       databaseAdminClient.createBackupScheduleCallable().futureCall(request);
    +   *   // Do something.
    +   *   BackupSchedule response = future.get();
    +   * }
    +   * }
    + */ + public final UnaryCallable + createBackupScheduleCallable() { + return stub.createBackupScheduleCallable(); } - @Override - public void shutdownNow() { - stub.shutdownNow(); + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets backup schedule for the input schedule name. + * + *

    Sample code: + * + *

    {@code
    +   * // This snippet has been automatically generated and should be regarded as a code template only.
    +   * // It will require modifications to work:
    +   * // - It may require correct/in-range values for request initialization.
    +   * // - It may require specifying regional endpoints when creating the service client as shown in
    +   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    +   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
    +   *   BackupScheduleName name =
    +   *       BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]");
    +   *   BackupSchedule response = databaseAdminClient.getBackupSchedule(name);
    +   * }
    +   * }
    + * + * @param name Required. The name of the schedule to retrieve. Values are of the form + * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BackupSchedule getBackupSchedule(BackupScheduleName name) { + GetBackupScheduleRequest request = + GetBackupScheduleRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + return getBackupSchedule(request); } - @Override - public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { - return stub.awaitTermination(duration, unit); + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets backup schedule for the input schedule name. + * + *

    Sample code: + * + *

    {@code
    +   * // This snippet has been automatically generated and should be regarded as a code template only.
    +   * // It will require modifications to work:
    +   * // - It may require correct/in-range values for request initialization.
    +   * // - It may require specifying regional endpoints when creating the service client as shown in
    +   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    +   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
    +   *   String name =
    +   *       BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]").toString();
    +   *   BackupSchedule response = databaseAdminClient.getBackupSchedule(name);
    +   * }
    +   * }
    + * + * @param name Required. The name of the schedule to retrieve. Values are of the form + * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BackupSchedule getBackupSchedule(String name) { + GetBackupScheduleRequest request = GetBackupScheduleRequest.newBuilder().setName(name).build(); + return getBackupSchedule(request); } - public static class ListDatabasesPagedResponse - extends AbstractPagedListResponse< - ListDatabasesRequest, - ListDatabasesResponse, - Database, - ListDatabasesPage, - ListDatabasesFixedSizeCollection> { - - public static ApiFuture createAsync( - PageContext context, - ApiFuture futureResponse) { - ApiFuture futurePage = - ListDatabasesPage.createEmptyPage().createPageAsync(context, futureResponse); - return ApiFutures.transform( - futurePage, - input -> new ListDatabasesPagedResponse(input), - MoreExecutors.directExecutor()); - } + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets backup schedule for the input schedule name. + * + *

    Sample code: + * + *

    {@code
    +   * // This snippet has been automatically generated and should be regarded as a code template only.
    +   * // It will require modifications to work:
    +   * // - It may require correct/in-range values for request initialization.
    +   * // - It may require specifying regional endpoints when creating the service client as shown in
    +   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    +   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
    +   *   GetBackupScheduleRequest request =
    +   *       GetBackupScheduleRequest.newBuilder()
    +   *           .setName(
    +   *               BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]")
    +   *                   .toString())
    +   *           .build();
    +   *   BackupSchedule response = databaseAdminClient.getBackupSchedule(request);
    +   * }
    +   * }
    + * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BackupSchedule getBackupSchedule(GetBackupScheduleRequest request) { + return getBackupScheduleCallable().call(request); + } - private ListDatabasesPagedResponse(ListDatabasesPage page) { - super(page, ListDatabasesFixedSizeCollection.createEmptyCollection()); - } + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Gets backup schedule for the input schedule name. + * + *

    Sample code: + * + *

    {@code
    +   * // This snippet has been automatically generated and should be regarded as a code template only.
    +   * // It will require modifications to work:
    +   * // - It may require correct/in-range values for request initialization.
    +   * // - It may require specifying regional endpoints when creating the service client as shown in
    +   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    +   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
    +   *   GetBackupScheduleRequest request =
    +   *       GetBackupScheduleRequest.newBuilder()
    +   *           .setName(
    +   *               BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]")
    +   *                   .toString())
    +   *           .build();
    +   *   ApiFuture future =
    +   *       databaseAdminClient.getBackupScheduleCallable().futureCall(request);
    +   *   // Do something.
    +   *   BackupSchedule response = future.get();
    +   * }
    +   * }
    + */ + public final UnaryCallable getBackupScheduleCallable() { + return stub.getBackupScheduleCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates a backup schedule. + * + *

    Sample code: + * + *

    {@code
    +   * // This snippet has been automatically generated and should be regarded as a code template only.
    +   * // It will require modifications to work:
    +   * // - It may require correct/in-range values for request initialization.
    +   * // - It may require specifying regional endpoints when creating the service client as shown in
    +   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    +   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
    +   *   BackupSchedule backupSchedule = BackupSchedule.newBuilder().build();
    +   *   FieldMask updateMask = FieldMask.newBuilder().build();
    +   *   BackupSchedule response =
    +   *       databaseAdminClient.updateBackupSchedule(backupSchedule, updateMask);
    +   * }
    +   * }
    + * + * @param backupSchedule Required. The backup schedule to update. `backup_schedule.name`, and the + * fields to be updated as specified by `update_mask` are required. Other fields are ignored. + * @param updateMask Required. A mask specifying which fields in the BackupSchedule resource + * should be updated. This mask is relative to the BackupSchedule resource, not to the request + * message. The field mask must always be specified; this prevents any future fields from + * being erased accidentally. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BackupSchedule updateBackupSchedule( + BackupSchedule backupSchedule, FieldMask updateMask) { + UpdateBackupScheduleRequest request = + UpdateBackupScheduleRequest.newBuilder() + .setBackupSchedule(backupSchedule) + .setUpdateMask(updateMask) + .build(); + return updateBackupSchedule(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates a backup schedule. + * + *

    Sample code: + * + *

    {@code
    +   * // This snippet has been automatically generated and should be regarded as a code template only.
    +   * // It will require modifications to work:
    +   * // - It may require correct/in-range values for request initialization.
    +   * // - It may require specifying regional endpoints when creating the service client as shown in
    +   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    +   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
    +   *   UpdateBackupScheduleRequest request =
    +   *       UpdateBackupScheduleRequest.newBuilder()
    +   *           .setBackupSchedule(BackupSchedule.newBuilder().build())
    +   *           .setUpdateMask(FieldMask.newBuilder().build())
    +   *           .build();
    +   *   BackupSchedule response = databaseAdminClient.updateBackupSchedule(request);
    +   * }
    +   * }
    + * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BackupSchedule updateBackupSchedule(UpdateBackupScheduleRequest request) { + return updateBackupScheduleCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Updates a backup schedule. + * + *

    Sample code: + * + *

    {@code
    +   * // This snippet has been automatically generated and should be regarded as a code template only.
    +   * // It will require modifications to work:
    +   * // - It may require correct/in-range values for request initialization.
    +   * // - It may require specifying regional endpoints when creating the service client as shown in
    +   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    +   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
    +   *   UpdateBackupScheduleRequest request =
    +   *       UpdateBackupScheduleRequest.newBuilder()
    +   *           .setBackupSchedule(BackupSchedule.newBuilder().build())
    +   *           .setUpdateMask(FieldMask.newBuilder().build())
    +   *           .build();
    +   *   ApiFuture future =
    +   *       databaseAdminClient.updateBackupScheduleCallable().futureCall(request);
    +   *   // Do something.
    +   *   BackupSchedule response = future.get();
    +   * }
    +   * }
    + */ + public final UnaryCallable + updateBackupScheduleCallable() { + return stub.updateBackupScheduleCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a backup schedule. + * + *

    Sample code: + * + *

    {@code
    +   * // This snippet has been automatically generated and should be regarded as a code template only.
    +   * // It will require modifications to work:
    +   * // - It may require correct/in-range values for request initialization.
    +   * // - It may require specifying regional endpoints when creating the service client as shown in
    +   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    +   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
    +   *   BackupScheduleName name =
    +   *       BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]");
    +   *   databaseAdminClient.deleteBackupSchedule(name);
    +   * }
    +   * }
    + * + * @param name Required. The name of the schedule to delete. Values are of the form + * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteBackupSchedule(BackupScheduleName name) { + DeleteBackupScheduleRequest request = + DeleteBackupScheduleRequest.newBuilder() + .setName(name == null ? null : name.toString()) + .build(); + deleteBackupSchedule(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a backup schedule. + * + *

    Sample code: + * + *

    {@code
    +   * // This snippet has been automatically generated and should be regarded as a code template only.
    +   * // It will require modifications to work:
    +   * // - It may require correct/in-range values for request initialization.
    +   * // - It may require specifying regional endpoints when creating the service client as shown in
    +   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    +   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
    +   *   String name =
    +   *       BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]").toString();
    +   *   databaseAdminClient.deleteBackupSchedule(name);
    +   * }
    +   * }
    + * + * @param name Required. The name of the schedule to delete. Values are of the form + * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteBackupSchedule(String name) { + DeleteBackupScheduleRequest request = + DeleteBackupScheduleRequest.newBuilder().setName(name).build(); + deleteBackupSchedule(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a backup schedule. + * + *

    Sample code: + * + *

    {@code
    +   * // This snippet has been automatically generated and should be regarded as a code template only.
    +   * // It will require modifications to work:
    +   * // - It may require correct/in-range values for request initialization.
    +   * // - It may require specifying regional endpoints when creating the service client as shown in
    +   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    +   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
    +   *   DeleteBackupScheduleRequest request =
    +   *       DeleteBackupScheduleRequest.newBuilder()
    +   *           .setName(
    +   *               BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]")
    +   *                   .toString())
    +   *           .build();
    +   *   databaseAdminClient.deleteBackupSchedule(request);
    +   * }
    +   * }
    + * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final void deleteBackupSchedule(DeleteBackupScheduleRequest request) { + deleteBackupScheduleCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Deletes a backup schedule. + * + *

    Sample code: + * + *

    {@code
    +   * // This snippet has been automatically generated and should be regarded as a code template only.
    +   * // It will require modifications to work:
    +   * // - It may require correct/in-range values for request initialization.
    +   * // - It may require specifying regional endpoints when creating the service client as shown in
    +   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    +   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
    +   *   DeleteBackupScheduleRequest request =
    +   *       DeleteBackupScheduleRequest.newBuilder()
    +   *           .setName(
    +   *               BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]")
    +   *                   .toString())
    +   *           .build();
    +   *   ApiFuture future =
    +   *       databaseAdminClient.deleteBackupScheduleCallable().futureCall(request);
    +   *   // Do something.
    +   *   future.get();
    +   * }
    +   * }
    + */ + public final UnaryCallable deleteBackupScheduleCallable() { + return stub.deleteBackupScheduleCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all the backup schedules for the database. + * + *

    Sample code: + * + *

    {@code
    +   * // This snippet has been automatically generated and should be regarded as a code template only.
    +   * // It will require modifications to work:
    +   * // - It may require correct/in-range values for request initialization.
    +   * // - It may require specifying regional endpoints when creating the service client as shown in
    +   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    +   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
    +   *   DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]");
    +   *   for (BackupSchedule element : databaseAdminClient.listBackupSchedules(parent).iterateAll()) {
    +   *     // doThingsWith(element);
    +   *   }
    +   * }
    +   * }
    + * + * @param parent Required. Database is the parent resource whose backup schedules should be + * listed. Values are of the form + * projects/<project>/instances/<instance>/databases/<database> + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListBackupSchedulesPagedResponse listBackupSchedules(DatabaseName parent) { + ListBackupSchedulesRequest request = + ListBackupSchedulesRequest.newBuilder() + .setParent(parent == null ? null : parent.toString()) + .build(); + return listBackupSchedules(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all the backup schedules for the database. + * + *

    Sample code: + * + *

    {@code
    +   * // This snippet has been automatically generated and should be regarded as a code template only.
    +   * // It will require modifications to work:
    +   * // - It may require correct/in-range values for request initialization.
    +   * // - It may require specifying regional endpoints when creating the service client as shown in
    +   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    +   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
    +   *   String parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString();
    +   *   for (BackupSchedule element : databaseAdminClient.listBackupSchedules(parent).iterateAll()) {
    +   *     // doThingsWith(element);
    +   *   }
    +   * }
    +   * }
    + * + * @param parent Required. Database is the parent resource whose backup schedules should be + * listed. Values are of the form + * projects/<project>/instances/<instance>/databases/<database> + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListBackupSchedulesPagedResponse listBackupSchedules(String parent) { + ListBackupSchedulesRequest request = + ListBackupSchedulesRequest.newBuilder().setParent(parent).build(); + return listBackupSchedules(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all the backup schedules for the database. + * + *

    Sample code: + * + *

    {@code
    +   * // This snippet has been automatically generated and should be regarded as a code template only.
    +   * // It will require modifications to work:
    +   * // - It may require correct/in-range values for request initialization.
    +   * // - It may require specifying regional endpoints when creating the service client as shown in
    +   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    +   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
    +   *   ListBackupSchedulesRequest request =
    +   *       ListBackupSchedulesRequest.newBuilder()
    +   *           .setParent(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
    +   *           .setPageSize(883849137)
    +   *           .setPageToken("pageToken873572522")
    +   *           .build();
    +   *   for (BackupSchedule element : databaseAdminClient.listBackupSchedules(request).iterateAll()) {
    +   *     // doThingsWith(element);
    +   *   }
    +   * }
    +   * }
    + * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ListBackupSchedulesPagedResponse listBackupSchedules( + ListBackupSchedulesRequest request) { + return listBackupSchedulesPagedCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all the backup schedules for the database. + * + *

    Sample code: + * + *

    {@code
    +   * // This snippet has been automatically generated and should be regarded as a code template only.
    +   * // It will require modifications to work:
    +   * // - It may require correct/in-range values for request initialization.
    +   * // - It may require specifying regional endpoints when creating the service client as shown in
    +   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    +   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
    +   *   ListBackupSchedulesRequest request =
    +   *       ListBackupSchedulesRequest.newBuilder()
    +   *           .setParent(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
    +   *           .setPageSize(883849137)
    +   *           .setPageToken("pageToken873572522")
    +   *           .build();
    +   *   ApiFuture future =
    +   *       databaseAdminClient.listBackupSchedulesPagedCallable().futureCall(request);
    +   *   // Do something.
    +   *   for (BackupSchedule element : future.get().iterateAll()) {
    +   *     // doThingsWith(element);
    +   *   }
    +   * }
    +   * }
    + */ + public final UnaryCallable + listBackupSchedulesPagedCallable() { + return stub.listBackupSchedulesPagedCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Lists all the backup schedules for the database. + * + *

    Sample code: + * + *

    {@code
    +   * // This snippet has been automatically generated and should be regarded as a code template only.
    +   * // It will require modifications to work:
    +   * // - It may require correct/in-range values for request initialization.
    +   * // - It may require specifying regional endpoints when creating the service client as shown in
    +   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    +   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
    +   *   ListBackupSchedulesRequest request =
    +   *       ListBackupSchedulesRequest.newBuilder()
    +   *           .setParent(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
    +   *           .setPageSize(883849137)
    +   *           .setPageToken("pageToken873572522")
    +   *           .build();
    +   *   while (true) {
    +   *     ListBackupSchedulesResponse response =
    +   *         databaseAdminClient.listBackupSchedulesCallable().call(request);
    +   *     for (BackupSchedule element : response.getBackupSchedulesList()) {
    +   *       // doThingsWith(element);
    +   *     }
    +   *     String nextPageToken = response.getNextPageToken();
    +   *     if (!Strings.isNullOrEmpty(nextPageToken)) {
    +   *       request = request.toBuilder().setPageToken(nextPageToken).build();
    +   *     } else {
    +   *       break;
    +   *     }
    +   *   }
    +   * }
    +   * }
    + */ + public final UnaryCallable + listBackupSchedulesCallable() { + return stub.listBackupSchedulesCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } + + public static class ListDatabasesPagedResponse + extends AbstractPagedListResponse< + ListDatabasesRequest, + ListDatabasesResponse, + Database, + ListDatabasesPage, + ListDatabasesFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListDatabasesPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, + input -> new ListDatabasesPagedResponse(input), + MoreExecutors.directExecutor()); + } + + private ListDatabasesPagedResponse(ListDatabasesPage page) { + super(page, ListDatabasesFixedSizeCollection.createEmptyCollection()); + } } public static class ListDatabasesPage @@ -4637,4 +5385,88 @@ protected ListDatabaseRolesFixedSizeCollection createCollection( return new ListDatabaseRolesFixedSizeCollection(pages, collectionSize); } } + + public static class ListBackupSchedulesPagedResponse + extends AbstractPagedListResponse< + ListBackupSchedulesRequest, + ListBackupSchedulesResponse, + BackupSchedule, + ListBackupSchedulesPage, + ListBackupSchedulesFixedSizeCollection> { + + public static ApiFuture createAsync( + PageContext + context, + ApiFuture futureResponse) { + ApiFuture futurePage = + ListBackupSchedulesPage.createEmptyPage().createPageAsync(context, futureResponse); + return ApiFutures.transform( + futurePage, + input -> new ListBackupSchedulesPagedResponse(input), + MoreExecutors.directExecutor()); + } + + private ListBackupSchedulesPagedResponse(ListBackupSchedulesPage page) { + super(page, ListBackupSchedulesFixedSizeCollection.createEmptyCollection()); + } + } + + public static class ListBackupSchedulesPage + extends AbstractPage< + ListBackupSchedulesRequest, + ListBackupSchedulesResponse, + BackupSchedule, + ListBackupSchedulesPage> { + + private ListBackupSchedulesPage( + PageContext + context, + ListBackupSchedulesResponse response) { + super(context, response); + } + + private static ListBackupSchedulesPage createEmptyPage() { + return new ListBackupSchedulesPage(null, null); + } + + @Override + protected ListBackupSchedulesPage createPage( + PageContext + context, + ListBackupSchedulesResponse response) { + return new ListBackupSchedulesPage(context, response); + } + + @Override + public ApiFuture createPageAsync( + PageContext + context, + ApiFuture futureResponse) { + return super.createPageAsync(context, futureResponse); + } + } + + public static class ListBackupSchedulesFixedSizeCollection + extends AbstractFixedSizeCollection< + ListBackupSchedulesRequest, + ListBackupSchedulesResponse, + BackupSchedule, + ListBackupSchedulesPage, + ListBackupSchedulesFixedSizeCollection> { + + private ListBackupSchedulesFixedSizeCollection( + List pages, int collectionSize) { + super(pages, collectionSize); + } + + private static ListBackupSchedulesFixedSizeCollection createEmptyCollection() { + return new ListBackupSchedulesFixedSizeCollection(null, 0); + } + + @Override + protected ListBackupSchedulesFixedSizeCollection createCollection( + List pages, int collectionSize) { + return new ListBackupSchedulesFixedSizeCollection(pages, collectionSize); + } + } } diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminSettings.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminSettings.java index 50d6900e1ce..cde80bf816e 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminSettings.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminSettings.java @@ -17,6 +17,7 @@ package com.google.cloud.spanner.admin.database.v1; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupOperationsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupSchedulesPagedResponse; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupsPagedResponse; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseOperationsPagedResponse; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseRolesPagedResponse; @@ -44,21 +45,27 @@ import com.google.longrunning.Operation; import com.google.protobuf.Empty; import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.BackupSchedule; import com.google.spanner.admin.database.v1.CopyBackupMetadata; import com.google.spanner.admin.database.v1.CopyBackupRequest; import com.google.spanner.admin.database.v1.CreateBackupMetadata; import com.google.spanner.admin.database.v1.CreateBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupScheduleRequest; import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; import com.google.spanner.admin.database.v1.CreateDatabaseRequest; import com.google.spanner.admin.database.v1.Database; import com.google.spanner.admin.database.v1.DeleteBackupRequest; +import com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest; import com.google.spanner.admin.database.v1.DropDatabaseRequest; import com.google.spanner.admin.database.v1.GetBackupRequest; +import com.google.spanner.admin.database.v1.GetBackupScheduleRequest; import com.google.spanner.admin.database.v1.GetDatabaseDdlRequest; import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; import com.google.spanner.admin.database.v1.GetDatabaseRequest; import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; +import com.google.spanner.admin.database.v1.ListBackupSchedulesRequest; +import com.google.spanner.admin.database.v1.ListBackupSchedulesResponse; import com.google.spanner.admin.database.v1.ListBackupsRequest; import com.google.spanner.admin.database.v1.ListBackupsResponse; import com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest; @@ -70,6 +77,7 @@ import com.google.spanner.admin.database.v1.RestoreDatabaseMetadata; import com.google.spanner.admin.database.v1.RestoreDatabaseRequest; import com.google.spanner.admin.database.v1.UpdateBackupRequest; +import com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest; import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; import com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest; import com.google.spanner.admin.database.v1.UpdateDatabaseMetadata; @@ -93,7 +101,9 @@ *

    The builder of this class is recursive, so contained classes are themselves builders. When * build() is called, the tree of builders is called to create the complete settings object. * - *

    For example, to set the total timeout of getDatabase to 30 seconds: + *

    For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of getDatabase: * *

    {@code
      * // This snippet has been automatically generated and should be regarded as a code template only.
    @@ -109,10 +119,46 @@
      *             .getDatabaseSettings()
      *             .getRetrySettings()
      *             .toBuilder()
    - *             .setTotalTimeout(Duration.ofSeconds(30))
    + *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
    + *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
    + *             .setMaxAttempts(5)
    + *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
    + *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
    + *             .setRetryDelayMultiplier(1.3)
    + *             .setRpcTimeoutMultiplier(1.5)
    + *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
      *             .build());
      * DatabaseAdminSettings databaseAdminSettings = databaseAdminSettingsBuilder.build();
      * }
    + * + * Please refer to the [Client Side Retry + * Guide](https://github.com/googleapis/google-cloud-java/blob/main/docs/client_retries.md) for + * additional support in setting retries. + * + *

    To configure the RetrySettings of a Long Running Operation method, create an + * OperationTimedPollAlgorithm object and update the RPC's polling algorithm. For example, to + * configure the RetrySettings for createDatabase: + * + *

    {@code
    + * // This snippet has been automatically generated and should be regarded as a code template only.
    + * // It will require modifications to work:
    + * // - It may require correct/in-range values for request initialization.
    + * // - It may require specifying regional endpoints when creating the service client as shown in
    + * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    + * DatabaseAdminSettings.Builder databaseAdminSettingsBuilder = DatabaseAdminSettings.newBuilder();
    + * TimedRetryAlgorithm timedRetryAlgorithm =
    + *     OperationalTimedPollAlgorithm.create(
    + *         RetrySettings.newBuilder()
    + *             .setInitialRetryDelayDuration(Duration.ofMillis(500))
    + *             .setRetryDelayMultiplier(1.5)
    + *             .setMaxRetryDelay(Duration.ofMillis(5000))
    + *             .setTotalTimeoutDuration(Duration.ofHours(24))
    + *             .build());
    + * databaseAdminSettingsBuilder
    + *     .createClusterOperationSettings()
    + *     .setPollingAlgorithm(timedRetryAlgorithm)
    + *     .build();
    + * }
    */ @Generated("by gapic-generator-java") public class DatabaseAdminSettings extends ClientSettings { @@ -266,6 +312,35 @@ public UnaryCallSettings restoreDatabaseSetti return ((DatabaseAdminStubSettings) getStubSettings()).listDatabaseRolesSettings(); } + /** Returns the object with the settings used for calls to createBackupSchedule. */ + public UnaryCallSettings + createBackupScheduleSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).createBackupScheduleSettings(); + } + + /** Returns the object with the settings used for calls to getBackupSchedule. */ + public UnaryCallSettings getBackupScheduleSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).getBackupScheduleSettings(); + } + + /** Returns the object with the settings used for calls to updateBackupSchedule. */ + public UnaryCallSettings + updateBackupScheduleSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).updateBackupScheduleSettings(); + } + + /** Returns the object with the settings used for calls to deleteBackupSchedule. */ + public UnaryCallSettings deleteBackupScheduleSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).deleteBackupScheduleSettings(); + } + + /** Returns the object with the settings used for calls to listBackupSchedules. */ + public PagedCallSettings< + ListBackupSchedulesRequest, ListBackupSchedulesResponse, ListBackupSchedulesPagedResponse> + listBackupSchedulesSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).listBackupSchedulesSettings(); + } + public static final DatabaseAdminSettings create(DatabaseAdminStubSettings stub) throws IOException { return new DatabaseAdminSettings.Builder(stub.toBuilder()).build(); @@ -531,6 +606,39 @@ public UnaryCallSettings.Builder restoreDatab return getStubSettingsBuilder().listDatabaseRolesSettings(); } + /** Returns the builder for the settings used for calls to createBackupSchedule. */ + public UnaryCallSettings.Builder + createBackupScheduleSettings() { + return getStubSettingsBuilder().createBackupScheduleSettings(); + } + + /** Returns the builder for the settings used for calls to getBackupSchedule. */ + public UnaryCallSettings.Builder + getBackupScheduleSettings() { + return getStubSettingsBuilder().getBackupScheduleSettings(); + } + + /** Returns the builder for the settings used for calls to updateBackupSchedule. */ + public UnaryCallSettings.Builder + updateBackupScheduleSettings() { + return getStubSettingsBuilder().updateBackupScheduleSettings(); + } + + /** Returns the builder for the settings used for calls to deleteBackupSchedule. */ + public UnaryCallSettings.Builder + deleteBackupScheduleSettings() { + return getStubSettingsBuilder().deleteBackupScheduleSettings(); + } + + /** Returns the builder for the settings used for calls to listBackupSchedules. */ + public PagedCallSettings.Builder< + ListBackupSchedulesRequest, + ListBackupSchedulesResponse, + ListBackupSchedulesPagedResponse> + listBackupSchedulesSettings() { + return getStubSettingsBuilder().listBackupSchedulesSettings(); + } + @Override public DatabaseAdminSettings build() throws IOException { return new DatabaseAdminSettings(this); diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/gapic_metadata.json b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/gapic_metadata.json index 01fcbd4de1a..7d6c894d7b6 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/gapic_metadata.json +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/gapic_metadata.json @@ -16,18 +16,27 @@ "CreateBackup": { "methods": ["createBackupAsync", "createBackupAsync", "createBackupAsync", "createBackupOperationCallable", "createBackupCallable"] }, + "CreateBackupSchedule": { + "methods": ["createBackupSchedule", "createBackupSchedule", "createBackupSchedule", "createBackupScheduleCallable"] + }, "CreateDatabase": { "methods": ["createDatabaseAsync", "createDatabaseAsync", "createDatabaseAsync", "createDatabaseOperationCallable", "createDatabaseCallable"] }, "DeleteBackup": { "methods": ["deleteBackup", "deleteBackup", "deleteBackup", "deleteBackupCallable"] }, + "DeleteBackupSchedule": { + "methods": ["deleteBackupSchedule", "deleteBackupSchedule", "deleteBackupSchedule", "deleteBackupScheduleCallable"] + }, "DropDatabase": { "methods": ["dropDatabase", "dropDatabase", "dropDatabase", "dropDatabaseCallable"] }, "GetBackup": { "methods": ["getBackup", "getBackup", "getBackup", "getBackupCallable"] }, + "GetBackupSchedule": { + "methods": ["getBackupSchedule", "getBackupSchedule", "getBackupSchedule", "getBackupScheduleCallable"] + }, "GetDatabase": { "methods": ["getDatabase", "getDatabase", "getDatabase", "getDatabaseCallable"] }, @@ -40,6 +49,9 @@ "ListBackupOperations": { "methods": ["listBackupOperations", "listBackupOperations", "listBackupOperations", "listBackupOperationsPagedCallable", "listBackupOperationsCallable"] }, + "ListBackupSchedules": { + "methods": ["listBackupSchedules", "listBackupSchedules", "listBackupSchedules", "listBackupSchedulesPagedCallable", "listBackupSchedulesCallable"] + }, "ListBackups": { "methods": ["listBackups", "listBackups", "listBackups", "listBackupsPagedCallable", "listBackupsCallable"] }, @@ -64,6 +76,9 @@ "UpdateBackup": { "methods": ["updateBackup", "updateBackup", "updateBackupCallable"] }, + "UpdateBackupSchedule": { + "methods": ["updateBackupSchedule", "updateBackupSchedule", "updateBackupScheduleCallable"] + }, "UpdateDatabase": { "methods": ["updateDatabaseAsync", "updateDatabaseAsync", "updateDatabaseOperationCallable", "updateDatabaseCallable"] }, diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/package-info.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/package-info.java index 403828ab3ef..1fd79833e09 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/package-info.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/package-info.java @@ -24,8 +24,8 @@ *

    Service Description: Cloud Spanner Database Admin API * *

    The Cloud Spanner Database Admin API can be used to: * create, drop, and list databases - * * update the schema of pre-existing databases * create, delete and list backups for a - * database * restore a database from an existing backup + * * update the schema of pre-existing databases * create, delete, copy and list backups for + * a database * restore a database from an existing backup * *

    Sample for DatabaseAdminClient: * diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStub.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStub.java index 37fb433c3eb..2f53f6cf5b4 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStub.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStub.java @@ -17,6 +17,7 @@ package com.google.cloud.spanner.admin.database.v1.stub; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupOperationsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupSchedulesPagedResponse; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupsPagedResponse; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseOperationsPagedResponse; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseRolesPagedResponse; @@ -34,21 +35,27 @@ import com.google.longrunning.stub.OperationsStub; import com.google.protobuf.Empty; import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.BackupSchedule; import com.google.spanner.admin.database.v1.CopyBackupMetadata; import com.google.spanner.admin.database.v1.CopyBackupRequest; import com.google.spanner.admin.database.v1.CreateBackupMetadata; import com.google.spanner.admin.database.v1.CreateBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupScheduleRequest; import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; import com.google.spanner.admin.database.v1.CreateDatabaseRequest; import com.google.spanner.admin.database.v1.Database; import com.google.spanner.admin.database.v1.DeleteBackupRequest; +import com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest; import com.google.spanner.admin.database.v1.DropDatabaseRequest; import com.google.spanner.admin.database.v1.GetBackupRequest; +import com.google.spanner.admin.database.v1.GetBackupScheduleRequest; import com.google.spanner.admin.database.v1.GetDatabaseDdlRequest; import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; import com.google.spanner.admin.database.v1.GetDatabaseRequest; import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; +import com.google.spanner.admin.database.v1.ListBackupSchedulesRequest; +import com.google.spanner.admin.database.v1.ListBackupSchedulesResponse; import com.google.spanner.admin.database.v1.ListBackupsRequest; import com.google.spanner.admin.database.v1.ListBackupsResponse; import com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest; @@ -60,6 +67,7 @@ import com.google.spanner.admin.database.v1.RestoreDatabaseMetadata; import com.google.spanner.admin.database.v1.RestoreDatabaseRequest; import com.google.spanner.admin.database.v1.UpdateBackupRequest; +import com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest; import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; import com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest; import com.google.spanner.admin.database.v1.UpdateDatabaseMetadata; @@ -223,6 +231,32 @@ public UnaryCallable restoreDatabaseCallable( throw new UnsupportedOperationException("Not implemented: listDatabaseRolesCallable()"); } + public UnaryCallable createBackupScheduleCallable() { + throw new UnsupportedOperationException("Not implemented: createBackupScheduleCallable()"); + } + + public UnaryCallable getBackupScheduleCallable() { + throw new UnsupportedOperationException("Not implemented: getBackupScheduleCallable()"); + } + + public UnaryCallable updateBackupScheduleCallable() { + throw new UnsupportedOperationException("Not implemented: updateBackupScheduleCallable()"); + } + + public UnaryCallable deleteBackupScheduleCallable() { + throw new UnsupportedOperationException("Not implemented: deleteBackupScheduleCallable()"); + } + + public UnaryCallable + listBackupSchedulesPagedCallable() { + throw new UnsupportedOperationException("Not implemented: listBackupSchedulesPagedCallable()"); + } + + public UnaryCallable + listBackupSchedulesCallable() { + throw new UnsupportedOperationException("Not implemented: listBackupSchedulesCallable()"); + } + @Override public abstract void close(); } diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStubSettings.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStubSettings.java index 4808f1553e9..5f292588139 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStubSettings.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStubSettings.java @@ -17,6 +17,7 @@ package com.google.cloud.spanner.admin.database.v1.stub; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupOperationsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupSchedulesPagedResponse; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupsPagedResponse; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseOperationsPagedResponse; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseRolesPagedResponse; @@ -25,6 +26,7 @@ import com.google.api.core.ApiFunction; import com.google.api.core.ApiFuture; import com.google.api.core.BetaApi; +import com.google.api.core.ObsoleteApi; import com.google.api.gax.core.GaxProperties; import com.google.api.gax.core.GoogleCredentialsProvider; import com.google.api.gax.core.InstantiatingExecutorProvider; @@ -63,22 +65,28 @@ import com.google.longrunning.Operation; import com.google.protobuf.Empty; import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.BackupSchedule; import com.google.spanner.admin.database.v1.CopyBackupMetadata; import com.google.spanner.admin.database.v1.CopyBackupRequest; import com.google.spanner.admin.database.v1.CreateBackupMetadata; import com.google.spanner.admin.database.v1.CreateBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupScheduleRequest; import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; import com.google.spanner.admin.database.v1.CreateDatabaseRequest; import com.google.spanner.admin.database.v1.Database; import com.google.spanner.admin.database.v1.DatabaseRole; import com.google.spanner.admin.database.v1.DeleteBackupRequest; +import com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest; import com.google.spanner.admin.database.v1.DropDatabaseRequest; import com.google.spanner.admin.database.v1.GetBackupRequest; +import com.google.spanner.admin.database.v1.GetBackupScheduleRequest; import com.google.spanner.admin.database.v1.GetDatabaseDdlRequest; import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; import com.google.spanner.admin.database.v1.GetDatabaseRequest; import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; +import com.google.spanner.admin.database.v1.ListBackupSchedulesRequest; +import com.google.spanner.admin.database.v1.ListBackupSchedulesResponse; import com.google.spanner.admin.database.v1.ListBackupsRequest; import com.google.spanner.admin.database.v1.ListBackupsResponse; import com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest; @@ -90,6 +98,7 @@ import com.google.spanner.admin.database.v1.RestoreDatabaseMetadata; import com.google.spanner.admin.database.v1.RestoreDatabaseRequest; import com.google.spanner.admin.database.v1.UpdateBackupRequest; +import com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest; import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; import com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest; import com.google.spanner.admin.database.v1.UpdateDatabaseMetadata; @@ -114,7 +123,9 @@ *

    The builder of this class is recursive, so contained classes are themselves builders. When * build() is called, the tree of builders is called to create the complete settings object. * - *

    For example, to set the total timeout of getDatabase to 30 seconds: + *

    For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of getDatabase: * *

    {@code
      * // This snippet has been automatically generated and should be regarded as a code template only.
    @@ -131,10 +142,47 @@
      *             .getDatabaseSettings()
      *             .getRetrySettings()
      *             .toBuilder()
    - *             .setTotalTimeout(Duration.ofSeconds(30))
    + *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
    + *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
    + *             .setMaxAttempts(5)
    + *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
    + *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
    + *             .setRetryDelayMultiplier(1.3)
    + *             .setRpcTimeoutMultiplier(1.5)
    + *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
      *             .build());
      * DatabaseAdminStubSettings databaseAdminSettings = databaseAdminSettingsBuilder.build();
      * }
    + * + * Please refer to the [Client Side Retry + * Guide](https://github.com/googleapis/google-cloud-java/blob/main/docs/client_retries.md) for + * additional support in setting retries. + * + *

    To configure the RetrySettings of a Long Running Operation method, create an + * OperationTimedPollAlgorithm object and update the RPC's polling algorithm. For example, to + * configure the RetrySettings for createDatabase: + * + *

    {@code
    + * // This snippet has been automatically generated and should be regarded as a code template only.
    + * // It will require modifications to work:
    + * // - It may require correct/in-range values for request initialization.
    + * // - It may require specifying regional endpoints when creating the service client as shown in
    + * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    + * DatabaseAdminStubSettings.Builder databaseAdminSettingsBuilder =
    + *     DatabaseAdminStubSettings.newBuilder();
    + * TimedRetryAlgorithm timedRetryAlgorithm =
    + *     OperationalTimedPollAlgorithm.create(
    + *         RetrySettings.newBuilder()
    + *             .setInitialRetryDelayDuration(Duration.ofMillis(500))
    + *             .setRetryDelayMultiplier(1.5)
    + *             .setMaxRetryDelay(Duration.ofMillis(5000))
    + *             .setTotalTimeoutDuration(Duration.ofHours(24))
    + *             .build());
    + * databaseAdminSettingsBuilder
    + *     .createClusterOperationSettings()
    + *     .setPollingAlgorithm(timedRetryAlgorithm)
    + *     .build();
    + * }
    */ @Generated("by gapic-generator-java") public class DatabaseAdminStubSettings extends StubSettings { @@ -192,6 +240,16 @@ public class DatabaseAdminStubSettings extends StubSettings listDatabaseRolesSettings; + private final UnaryCallSettings + createBackupScheduleSettings; + private final UnaryCallSettings + getBackupScheduleSettings; + private final UnaryCallSettings + updateBackupScheduleSettings; + private final UnaryCallSettings deleteBackupScheduleSettings; + private final PagedCallSettings< + ListBackupSchedulesRequest, ListBackupSchedulesResponse, ListBackupSchedulesPagedResponse> + listBackupSchedulesSettings; private static final PagedListDescriptor LIST_DATABASES_PAGE_STR_DESC = @@ -223,9 +281,7 @@ public String extractNextToken(ListDatabasesResponse payload) { @Override public Iterable extractResources(ListDatabasesResponse payload) { - return payload.getDatabasesList() == null - ? ImmutableList.of() - : payload.getDatabasesList(); + return payload.getDatabasesList(); } }; @@ -259,9 +315,7 @@ public String extractNextToken(ListBackupsResponse payload) { @Override public Iterable extractResources(ListBackupsResponse payload) { - return payload.getBackupsList() == null - ? ImmutableList.of() - : payload.getBackupsList(); + return payload.getBackupsList(); } }; @@ -301,9 +355,7 @@ public String extractNextToken(ListDatabaseOperationsResponse payload) { @Override public Iterable extractResources(ListDatabaseOperationsResponse payload) { - return payload.getOperationsList() == null - ? ImmutableList.of() - : payload.getOperationsList(); + return payload.getOperationsList(); } }; @@ -341,9 +393,7 @@ public String extractNextToken(ListBackupOperationsResponse payload) { @Override public Iterable extractResources(ListBackupOperationsResponse payload) { - return payload.getOperationsList() == null - ? ImmutableList.of() - : payload.getOperationsList(); + return payload.getOperationsList(); } }; @@ -381,9 +431,45 @@ public String extractNextToken(ListDatabaseRolesResponse payload) { @Override public Iterable extractResources(ListDatabaseRolesResponse payload) { - return payload.getDatabaseRolesList() == null - ? ImmutableList.of() - : payload.getDatabaseRolesList(); + return payload.getDatabaseRolesList(); + } + }; + + private static final PagedListDescriptor< + ListBackupSchedulesRequest, ListBackupSchedulesResponse, BackupSchedule> + LIST_BACKUP_SCHEDULES_PAGE_STR_DESC = + new PagedListDescriptor< + ListBackupSchedulesRequest, ListBackupSchedulesResponse, BackupSchedule>() { + @Override + public String emptyToken() { + return ""; + } + + @Override + public ListBackupSchedulesRequest injectToken( + ListBackupSchedulesRequest payload, String token) { + return ListBackupSchedulesRequest.newBuilder(payload).setPageToken(token).build(); + } + + @Override + public ListBackupSchedulesRequest injectPageSize( + ListBackupSchedulesRequest payload, int pageSize) { + return ListBackupSchedulesRequest.newBuilder(payload).setPageSize(pageSize).build(); + } + + @Override + public Integer extractPageSize(ListBackupSchedulesRequest payload) { + return payload.getPageSize(); + } + + @Override + public String extractNextToken(ListBackupSchedulesResponse payload) { + return payload.getNextPageToken(); + } + + @Override + public Iterable extractResources(ListBackupSchedulesResponse payload) { + return payload.getBackupSchedulesList(); } }; @@ -489,6 +575,27 @@ public ApiFuture getFuturePagedResponse( } }; + private static final PagedListResponseFactory< + ListBackupSchedulesRequest, ListBackupSchedulesResponse, ListBackupSchedulesPagedResponse> + LIST_BACKUP_SCHEDULES_PAGE_STR_FACT = + new PagedListResponseFactory< + ListBackupSchedulesRequest, + ListBackupSchedulesResponse, + ListBackupSchedulesPagedResponse>() { + @Override + public ApiFuture getFuturePagedResponse( + UnaryCallable callable, + ListBackupSchedulesRequest request, + ApiCallContext context, + ApiFuture futureResponse) { + PageContext + pageContext = + PageContext.create( + callable, LIST_BACKUP_SCHEDULES_PAGE_STR_DESC, request, context); + return ListBackupSchedulesPagedResponse.createAsync(pageContext, futureResponse); + } + }; + /** Returns the object with the settings used for calls to listDatabases. */ public PagedCallSettings listDatabasesSettings() { @@ -638,6 +745,35 @@ public UnaryCallSettings restoreDatabaseSetti return listDatabaseRolesSettings; } + /** Returns the object with the settings used for calls to createBackupSchedule. */ + public UnaryCallSettings + createBackupScheduleSettings() { + return createBackupScheduleSettings; + } + + /** Returns the object with the settings used for calls to getBackupSchedule. */ + public UnaryCallSettings getBackupScheduleSettings() { + return getBackupScheduleSettings; + } + + /** Returns the object with the settings used for calls to updateBackupSchedule. */ + public UnaryCallSettings + updateBackupScheduleSettings() { + return updateBackupScheduleSettings; + } + + /** Returns the object with the settings used for calls to deleteBackupSchedule. */ + public UnaryCallSettings deleteBackupScheduleSettings() { + return deleteBackupScheduleSettings; + } + + /** Returns the object with the settings used for calls to listBackupSchedules. */ + public PagedCallSettings< + ListBackupSchedulesRequest, ListBackupSchedulesResponse, ListBackupSchedulesPagedResponse> + listBackupSchedulesSettings() { + return listBackupSchedulesSettings; + } + public DatabaseAdminStub createStub() throws IOException { if (getTransportChannelProvider() .getTransportName() @@ -666,6 +802,7 @@ public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuild } /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") public static String getDefaultEndpoint() { return "spanner.googleapis.com:443"; } @@ -775,6 +912,11 @@ protected DatabaseAdminStubSettings(Builder settingsBuilder) throws IOException listDatabaseOperationsSettings = settingsBuilder.listDatabaseOperationsSettings().build(); listBackupOperationsSettings = settingsBuilder.listBackupOperationsSettings().build(); listDatabaseRolesSettings = settingsBuilder.listDatabaseRolesSettings().build(); + createBackupScheduleSettings = settingsBuilder.createBackupScheduleSettings().build(); + getBackupScheduleSettings = settingsBuilder.getBackupScheduleSettings().build(); + updateBackupScheduleSettings = settingsBuilder.updateBackupScheduleSettings().build(); + deleteBackupScheduleSettings = settingsBuilder.deleteBackupScheduleSettings().build(); + listBackupSchedulesSettings = settingsBuilder.listBackupSchedulesSettings().build(); } /** Builder for DatabaseAdminStubSettings. */ @@ -836,6 +978,19 @@ public static class Builder extends StubSettings.Builder listDatabaseRolesSettings; + private final UnaryCallSettings.Builder + createBackupScheduleSettings; + private final UnaryCallSettings.Builder + getBackupScheduleSettings; + private final UnaryCallSettings.Builder + updateBackupScheduleSettings; + private final UnaryCallSettings.Builder + deleteBackupScheduleSettings; + private final PagedCallSettings.Builder< + ListBackupSchedulesRequest, + ListBackupSchedulesResponse, + ListBackupSchedulesPagedResponse> + listBackupSchedulesSettings; private static final ImmutableMap> RETRYABLE_CODE_DEFINITIONS; @@ -940,6 +1095,12 @@ protected Builder(ClientContext clientContext) { listBackupOperationsSettings = PagedCallSettings.newBuilder(LIST_BACKUP_OPERATIONS_PAGE_STR_FACT); listDatabaseRolesSettings = PagedCallSettings.newBuilder(LIST_DATABASE_ROLES_PAGE_STR_FACT); + createBackupScheduleSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + getBackupScheduleSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + updateBackupScheduleSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + deleteBackupScheduleSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + listBackupSchedulesSettings = + PagedCallSettings.newBuilder(LIST_BACKUP_SCHEDULES_PAGE_STR_FACT); unaryMethodSettingsBuilders = ImmutableList.>of( @@ -962,7 +1123,12 @@ protected Builder(ClientContext clientContext) { restoreDatabaseSettings, listDatabaseOperationsSettings, listBackupOperationsSettings, - listDatabaseRolesSettings); + listDatabaseRolesSettings, + createBackupScheduleSettings, + getBackupScheduleSettings, + updateBackupScheduleSettings, + deleteBackupScheduleSettings, + listBackupSchedulesSettings); initDefaults(this); } @@ -995,6 +1161,11 @@ protected Builder(DatabaseAdminStubSettings settings) { listDatabaseOperationsSettings = settings.listDatabaseOperationsSettings.toBuilder(); listBackupOperationsSettings = settings.listBackupOperationsSettings.toBuilder(); listDatabaseRolesSettings = settings.listDatabaseRolesSettings.toBuilder(); + createBackupScheduleSettings = settings.createBackupScheduleSettings.toBuilder(); + getBackupScheduleSettings = settings.getBackupScheduleSettings.toBuilder(); + updateBackupScheduleSettings = settings.updateBackupScheduleSettings.toBuilder(); + deleteBackupScheduleSettings = settings.deleteBackupScheduleSettings.toBuilder(); + listBackupSchedulesSettings = settings.listBackupSchedulesSettings.toBuilder(); unaryMethodSettingsBuilders = ImmutableList.>of( @@ -1017,7 +1188,12 @@ protected Builder(DatabaseAdminStubSettings settings) { restoreDatabaseSettings, listDatabaseOperationsSettings, listBackupOperationsSettings, - listDatabaseRolesSettings); + listDatabaseRolesSettings, + createBackupScheduleSettings, + getBackupScheduleSettings, + updateBackupScheduleSettings, + deleteBackupScheduleSettings, + listBackupSchedulesSettings); } private static Builder createDefault() { @@ -1145,6 +1321,31 @@ private static Builder initDefaults(Builder builder) { .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + builder + .createBackupScheduleSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .getBackupScheduleSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .updateBackupScheduleSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .deleteBackupScheduleSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + + builder + .listBackupSchedulesSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + builder .createDatabaseOperationSettings() .setInitialCallSettings( @@ -1460,6 +1661,39 @@ public UnaryCallSettings.Builder restoreDatab return listDatabaseRolesSettings; } + /** Returns the builder for the settings used for calls to createBackupSchedule. */ + public UnaryCallSettings.Builder + createBackupScheduleSettings() { + return createBackupScheduleSettings; + } + + /** Returns the builder for the settings used for calls to getBackupSchedule. */ + public UnaryCallSettings.Builder + getBackupScheduleSettings() { + return getBackupScheduleSettings; + } + + /** Returns the builder for the settings used for calls to updateBackupSchedule. */ + public UnaryCallSettings.Builder + updateBackupScheduleSettings() { + return updateBackupScheduleSettings; + } + + /** Returns the builder for the settings used for calls to deleteBackupSchedule. */ + public UnaryCallSettings.Builder + deleteBackupScheduleSettings() { + return deleteBackupScheduleSettings; + } + + /** Returns the builder for the settings used for calls to listBackupSchedules. */ + public PagedCallSettings.Builder< + ListBackupSchedulesRequest, + ListBackupSchedulesResponse, + ListBackupSchedulesPagedResponse> + listBackupSchedulesSettings() { + return listBackupSchedulesSettings; + } + @Override public DatabaseAdminStubSettings build() throws IOException { return new DatabaseAdminStubSettings(this); diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/GrpcDatabaseAdminStub.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/GrpcDatabaseAdminStub.java index 875ff8443fc..8207ebcbce5 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/GrpcDatabaseAdminStub.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/GrpcDatabaseAdminStub.java @@ -17,6 +17,7 @@ package com.google.cloud.spanner.admin.database.v1.stub; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupOperationsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupSchedulesPagedResponse; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupsPagedResponse; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseOperationsPagedResponse; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseRolesPagedResponse; @@ -39,21 +40,27 @@ import com.google.longrunning.stub.GrpcOperationsStub; import com.google.protobuf.Empty; import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.BackupSchedule; import com.google.spanner.admin.database.v1.CopyBackupMetadata; import com.google.spanner.admin.database.v1.CopyBackupRequest; import com.google.spanner.admin.database.v1.CreateBackupMetadata; import com.google.spanner.admin.database.v1.CreateBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupScheduleRequest; import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; import com.google.spanner.admin.database.v1.CreateDatabaseRequest; import com.google.spanner.admin.database.v1.Database; import com.google.spanner.admin.database.v1.DeleteBackupRequest; +import com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest; import com.google.spanner.admin.database.v1.DropDatabaseRequest; import com.google.spanner.admin.database.v1.GetBackupRequest; +import com.google.spanner.admin.database.v1.GetBackupScheduleRequest; import com.google.spanner.admin.database.v1.GetDatabaseDdlRequest; import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; import com.google.spanner.admin.database.v1.GetDatabaseRequest; import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; +import com.google.spanner.admin.database.v1.ListBackupSchedulesRequest; +import com.google.spanner.admin.database.v1.ListBackupSchedulesResponse; import com.google.spanner.admin.database.v1.ListBackupsRequest; import com.google.spanner.admin.database.v1.ListBackupsResponse; import com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest; @@ -65,6 +72,7 @@ import com.google.spanner.admin.database.v1.RestoreDatabaseMetadata; import com.google.spanner.admin.database.v1.RestoreDatabaseRequest; import com.google.spanner.admin.database.v1.UpdateBackupRequest; +import com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest; import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; import com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest; import com.google.spanner.admin.database.v1.UpdateDatabaseMetadata; @@ -277,6 +285,61 @@ public class GrpcDatabaseAdminStub extends DatabaseAdminStub { ProtoUtils.marshaller(ListDatabaseRolesResponse.getDefaultInstance())) .build(); + private static final MethodDescriptor + createBackupScheduleMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.database.v1.DatabaseAdmin/CreateBackupSchedule") + .setRequestMarshaller( + ProtoUtils.marshaller(CreateBackupScheduleRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(BackupSchedule.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + getBackupScheduleMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/GetBackupSchedule") + .setRequestMarshaller( + ProtoUtils.marshaller(GetBackupScheduleRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(BackupSchedule.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + updateBackupScheduleMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackupSchedule") + .setRequestMarshaller( + ProtoUtils.marshaller(UpdateBackupScheduleRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(BackupSchedule.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + deleteBackupScheduleMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackupSchedule") + .setRequestMarshaller( + ProtoUtils.marshaller(DeleteBackupScheduleRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .build(); + + private static final MethodDescriptor + listBackupSchedulesMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.database.v1.DatabaseAdmin/ListBackupSchedules") + .setRequestMarshaller( + ProtoUtils.marshaller(ListBackupSchedulesRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(ListBackupSchedulesResponse.getDefaultInstance())) + .build(); + private final UnaryCallable listDatabasesCallable; private final UnaryCallable listDatabasesPagedCallable; @@ -323,6 +386,16 @@ public class GrpcDatabaseAdminStub extends DatabaseAdminStub { listDatabaseRolesCallable; private final UnaryCallable listDatabaseRolesPagedCallable; + private final UnaryCallable + createBackupScheduleCallable; + private final UnaryCallable getBackupScheduleCallable; + private final UnaryCallable + updateBackupScheduleCallable; + private final UnaryCallable deleteBackupScheduleCallable; + private final UnaryCallable + listBackupSchedulesCallable; + private final UnaryCallable + listBackupSchedulesPagedCallable; private final BackgroundResource backgroundResources; private final GrpcOperationsStub operationsStub; @@ -572,6 +645,61 @@ protected GrpcDatabaseAdminStub( return builder.build(); }) .build(); + GrpcCallSettings + createBackupScheduleTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createBackupScheduleMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + GrpcCallSettings getBackupScheduleTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getBackupScheduleMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings + updateBackupScheduleTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(updateBackupScheduleMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "backup_schedule.name", + String.valueOf(request.getBackupSchedule().getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings deleteBackupScheduleTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(deleteBackupScheduleMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + GrpcCallSettings + listBackupSchedulesTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(listBackupSchedulesMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); this.listDatabasesCallable = callableFactory.createUnaryCallable( @@ -700,6 +828,36 @@ protected GrpcDatabaseAdminStub( listDatabaseRolesTransportSettings, settings.listDatabaseRolesSettings(), clientContext); + this.createBackupScheduleCallable = + callableFactory.createUnaryCallable( + createBackupScheduleTransportSettings, + settings.createBackupScheduleSettings(), + clientContext); + this.getBackupScheduleCallable = + callableFactory.createUnaryCallable( + getBackupScheduleTransportSettings, + settings.getBackupScheduleSettings(), + clientContext); + this.updateBackupScheduleCallable = + callableFactory.createUnaryCallable( + updateBackupScheduleTransportSettings, + settings.updateBackupScheduleSettings(), + clientContext); + this.deleteBackupScheduleCallable = + callableFactory.createUnaryCallable( + deleteBackupScheduleTransportSettings, + settings.deleteBackupScheduleSettings(), + clientContext); + this.listBackupSchedulesCallable = + callableFactory.createUnaryCallable( + listBackupSchedulesTransportSettings, + settings.listBackupSchedulesSettings(), + clientContext); + this.listBackupSchedulesPagedCallable = + callableFactory.createPagedCallable( + listBackupSchedulesTransportSettings, + settings.listBackupSchedulesSettings(), + clientContext); this.backgroundResources = new BackgroundResourceAggregation(clientContext.getBackgroundResources()); @@ -878,6 +1036,38 @@ public UnaryCallable restoreDatabaseCallable( return listDatabaseRolesPagedCallable; } + @Override + public UnaryCallable createBackupScheduleCallable() { + return createBackupScheduleCallable; + } + + @Override + public UnaryCallable getBackupScheduleCallable() { + return getBackupScheduleCallable; + } + + @Override + public UnaryCallable updateBackupScheduleCallable() { + return updateBackupScheduleCallable; + } + + @Override + public UnaryCallable deleteBackupScheduleCallable() { + return deleteBackupScheduleCallable; + } + + @Override + public UnaryCallable + listBackupSchedulesCallable() { + return listBackupSchedulesCallable; + } + + @Override + public UnaryCallable + listBackupSchedulesPagedCallable() { + return listBackupSchedulesPagedCallable; + } + @Override public final void close() { try { diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/HttpJsonDatabaseAdminStub.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/HttpJsonDatabaseAdminStub.java index 1eaa5055133..fbe9f02b1f8 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/HttpJsonDatabaseAdminStub.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/HttpJsonDatabaseAdminStub.java @@ -17,6 +17,7 @@ package com.google.cloud.spanner.admin.database.v1.stub; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupOperationsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupSchedulesPagedResponse; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupsPagedResponse; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseOperationsPagedResponse; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseRolesPagedResponse; @@ -48,21 +49,27 @@ import com.google.protobuf.Empty; import com.google.protobuf.TypeRegistry; import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.BackupSchedule; import com.google.spanner.admin.database.v1.CopyBackupMetadata; import com.google.spanner.admin.database.v1.CopyBackupRequest; import com.google.spanner.admin.database.v1.CreateBackupMetadata; import com.google.spanner.admin.database.v1.CreateBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupScheduleRequest; import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; import com.google.spanner.admin.database.v1.CreateDatabaseRequest; import com.google.spanner.admin.database.v1.Database; import com.google.spanner.admin.database.v1.DeleteBackupRequest; +import com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest; import com.google.spanner.admin.database.v1.DropDatabaseRequest; import com.google.spanner.admin.database.v1.GetBackupRequest; +import com.google.spanner.admin.database.v1.GetBackupScheduleRequest; import com.google.spanner.admin.database.v1.GetDatabaseDdlRequest; import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; import com.google.spanner.admin.database.v1.GetDatabaseRequest; import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; +import com.google.spanner.admin.database.v1.ListBackupSchedulesRequest; +import com.google.spanner.admin.database.v1.ListBackupSchedulesResponse; import com.google.spanner.admin.database.v1.ListBackupsRequest; import com.google.spanner.admin.database.v1.ListBackupsResponse; import com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest; @@ -74,6 +81,7 @@ import com.google.spanner.admin.database.v1.RestoreDatabaseMetadata; import com.google.spanner.admin.database.v1.RestoreDatabaseRequest; import com.google.spanner.admin.database.v1.UpdateBackupRequest; +import com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest; import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; import com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest; import com.google.spanner.admin.database.v1.UpdateDatabaseMetadata; @@ -385,7 +393,8 @@ public class HttpJsonDatabaseAdminStub extends DatabaseAdminStub { return fields; }) .setAdditionalPaths( - "/v1/{resource=projects/*/instances/*/backups/*}:setIamPolicy") + "/v1/{resource=projects/*/instances/*/backups/*}:setIamPolicy", + "/v1/{resource=projects/*/instances/*/databases/*/backupSchedules/*}:setIamPolicy") .setQueryParamsExtractor( request -> { Map> fields = new HashMap<>(); @@ -424,7 +433,8 @@ public class HttpJsonDatabaseAdminStub extends DatabaseAdminStub { return fields; }) .setAdditionalPaths( - "/v1/{resource=projects/*/instances/*/backups/*}:getIamPolicy") + "/v1/{resource=projects/*/instances/*/backups/*}:getIamPolicy", + "/v1/{resource=projects/*/instances/*/databases/*/backupSchedules/*}:getIamPolicy") .setQueryParamsExtractor( request -> { Map> fields = new HashMap<>(); @@ -465,6 +475,7 @@ public class HttpJsonDatabaseAdminStub extends DatabaseAdminStub { }) .setAdditionalPaths( "/v1/{resource=projects/*/instances/*/backups/*}:testIamPermissions", + "/v1/{resource=projects/*/instances/*/databases/*/backupSchedules/*}:testIamPermissions", "/v1/{resource=projects/*/instances/*/databases/*/databaseRoles/*}:testIamPermissions") .setQueryParamsExtractor( request -> { @@ -868,6 +879,194 @@ public class HttpJsonDatabaseAdminStub extends DatabaseAdminStub { .build()) .build(); + private static final ApiMethodDescriptor + createBackupScheduleMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.spanner.admin.database.v1.DatabaseAdmin/CreateBackupSchedule") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*/instances/*/databases/*}/backupSchedules", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam( + fields, "backupScheduleId", request.getBackupScheduleId()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("backupSchedule", request.getBackupSchedule(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(BackupSchedule.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + getBackupScheduleMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.database.v1.DatabaseAdmin/GetBackupSchedule") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/instances/*/databases/*/backupSchedules/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(BackupSchedule.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + updateBackupScheduleMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackupSchedule") + .setHttpMethod("PATCH") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{backupSchedule.name=projects/*/instances/*/databases/*/backupSchedules/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam( + fields, + "backupSchedule.name", + request.getBackupSchedule().getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "updateMask", request.getUpdateMask()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("backupSchedule", request.getBackupSchedule(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(BackupSchedule.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + deleteBackupScheduleMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackupSchedule") + .setHttpMethod("DELETE") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/instances/*/databases/*/backupSchedules/*}", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Empty.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + + private static final ApiMethodDescriptor + listBackupSchedulesMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName( + "google.spanner.admin.database.v1.DatabaseAdmin/ListBackupSchedules") + .setHttpMethod("GET") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{parent=projects/*/instances/*/databases/*}/backupSchedules", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "parent", request.getParent()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "pageSize", request.getPageSize()); + serializer.putQueryParam(fields, "pageToken", request.getPageToken()); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor(request -> null) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(ListBackupSchedulesResponse.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .build(); + private final UnaryCallable listDatabasesCallable; private final UnaryCallable listDatabasesPagedCallable; @@ -914,6 +1113,16 @@ public class HttpJsonDatabaseAdminStub extends DatabaseAdminStub { listDatabaseRolesCallable; private final UnaryCallable listDatabaseRolesPagedCallable; + private final UnaryCallable + createBackupScheduleCallable; + private final UnaryCallable getBackupScheduleCallable; + private final UnaryCallable + updateBackupScheduleCallable; + private final UnaryCallable deleteBackupScheduleCallable; + private final UnaryCallable + listBackupSchedulesCallable; + private final UnaryCallable + listBackupSchedulesPagedCallable; private final BackgroundResource backgroundResources; private final HttpJsonOperationsStub httpJsonOperationsStub; @@ -1265,6 +1474,68 @@ protected HttpJsonDatabaseAdminStub( return builder.build(); }) .build(); + HttpJsonCallSettings + createBackupScheduleTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(createBackupScheduleMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + getBackupScheduleTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(getBackupScheduleMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + updateBackupScheduleTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(updateBackupScheduleMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add( + "backup_schedule.name", + String.valueOf(request.getBackupSchedule().getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings deleteBackupScheduleTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(deleteBackupScheduleMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); + HttpJsonCallSettings + listBackupSchedulesTransportSettings = + HttpJsonCallSettings + .newBuilder() + .setMethodDescriptor(listBackupSchedulesMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("parent", String.valueOf(request.getParent())); + return builder.build(); + }) + .build(); this.listDatabasesCallable = callableFactory.createUnaryCallable( @@ -1393,6 +1664,36 @@ protected HttpJsonDatabaseAdminStub( listDatabaseRolesTransportSettings, settings.listDatabaseRolesSettings(), clientContext); + this.createBackupScheduleCallable = + callableFactory.createUnaryCallable( + createBackupScheduleTransportSettings, + settings.createBackupScheduleSettings(), + clientContext); + this.getBackupScheduleCallable = + callableFactory.createUnaryCallable( + getBackupScheduleTransportSettings, + settings.getBackupScheduleSettings(), + clientContext); + this.updateBackupScheduleCallable = + callableFactory.createUnaryCallable( + updateBackupScheduleTransportSettings, + settings.updateBackupScheduleSettings(), + clientContext); + this.deleteBackupScheduleCallable = + callableFactory.createUnaryCallable( + deleteBackupScheduleTransportSettings, + settings.deleteBackupScheduleSettings(), + clientContext); + this.listBackupSchedulesCallable = + callableFactory.createUnaryCallable( + listBackupSchedulesTransportSettings, + settings.listBackupSchedulesSettings(), + clientContext); + this.listBackupSchedulesPagedCallable = + callableFactory.createPagedCallable( + listBackupSchedulesTransportSettings, + settings.listBackupSchedulesSettings(), + clientContext); this.backgroundResources = new BackgroundResourceAggregation(clientContext.getBackgroundResources()); @@ -1421,6 +1722,11 @@ public static List getMethodDescriptors() { methodDescriptors.add(listDatabaseOperationsMethodDescriptor); methodDescriptors.add(listBackupOperationsMethodDescriptor); methodDescriptors.add(listDatabaseRolesMethodDescriptor); + methodDescriptors.add(createBackupScheduleMethodDescriptor); + methodDescriptors.add(getBackupScheduleMethodDescriptor); + methodDescriptors.add(updateBackupScheduleMethodDescriptor); + methodDescriptors.add(deleteBackupScheduleMethodDescriptor); + methodDescriptors.add(listBackupSchedulesMethodDescriptor); return methodDescriptors; } @@ -1597,6 +1903,38 @@ public UnaryCallable restoreDatabaseCallable( return listDatabaseRolesPagedCallable; } + @Override + public UnaryCallable createBackupScheduleCallable() { + return createBackupScheduleCallable; + } + + @Override + public UnaryCallable getBackupScheduleCallable() { + return getBackupScheduleCallable; + } + + @Override + public UnaryCallable updateBackupScheduleCallable() { + return updateBackupScheduleCallable; + } + + @Override + public UnaryCallable deleteBackupScheduleCallable() { + return deleteBackupScheduleCallable; + } + + @Override + public UnaryCallable + listBackupSchedulesCallable() { + return listBackupSchedulesCallable; + } + + @Override + public UnaryCallable + listBackupSchedulesPagedCallable() { + return listBackupSchedulesPagedCallable; + } + @Override public final void close() { try { diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminClient.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminClient.java index 2e08751ff12..8a7e12b50e7 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminClient.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminClient.java @@ -68,6 +68,9 @@ import com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse; import com.google.spanner.admin.instance.v1.ListInstancesRequest; import com.google.spanner.admin.instance.v1.ListInstancesResponse; +import com.google.spanner.admin.instance.v1.MoveInstanceMetadata; +import com.google.spanner.admin.instance.v1.MoveInstanceRequest; +import com.google.spanner.admin.instance.v1.MoveInstanceResponse; import com.google.spanner.admin.instance.v1.ProjectName; import com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata; import com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest; @@ -166,14 +169,14 @@ * * *

    CreateInstanceConfig - *

    Creates an instance config and begins preparing it to be used. The returned [long-running operation][google.longrunning.Operation] can be used to track the progress of preparing the new instance config. The instance config name is assigned by the caller. If the named instance config already exists, `CreateInstanceConfig` returns `ALREADY_EXISTS`. + *

    Creates an instance configuration and begins preparing it to be used. The returned [long-running operation][google.longrunning.Operation] can be used to track the progress of preparing the new instance configuration. The instance configuration name is assigned by the caller. If the named instance configuration already exists, `CreateInstanceConfig` returns `ALREADY_EXISTS`. *

    Immediately after the request returns: - *

    * The instance config is readable via the API, with all requested attributes. The instance config's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to true. Its state is `CREATING`. + *

    * The instance configuration is readable via the API, with all requested attributes. The instance configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to true. Its state is `CREATING`. *

    While the operation is pending: - *

    * Cancelling the operation renders the instance config immediately unreadable via the API. * Except for deleting the creating resource, all other attempts to modify the instance config are rejected. + *

    * Cancelling the operation renders the instance configuration immediately unreadable via the API. * Except for deleting the creating resource, all other attempts to modify the instance configuration are rejected. *

    Upon completion of the returned operation: - *

    * Instances can be created using the instance configuration. * The instance config's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. Its state becomes `READY`. - *

    The returned [long-running operation][google.longrunning.Operation] will have a name of the format `<instance_config_name>/operations/<operation_id>` and can be used to track creation of the instance config. The [metadata][google.longrunning.Operation.metadata] field type is [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. The [response][google.longrunning.Operation.response] field type is [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if successful. + *

    * Instances can be created using the instance configuration. * The instance configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. Its state becomes `READY`. + *

    The returned [long-running operation][google.longrunning.Operation] will have a name of the format `<instance_config_name>/operations/<operation_id>` and can be used to track creation of the instance configuration. The [metadata][google.longrunning.Operation.metadata] field type is [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. The [response][google.longrunning.Operation.response] field type is [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if successful. *

    Authorization requires `spanner.instanceConfigs.create` permission on the resource [parent][google.spanner.admin.instance.v1.CreateInstanceConfigRequest.parent]. * *

    Request object method variants only take one parameter, a request object, which must be constructed before the call.

    @@ -194,15 +197,15 @@ * * *

    UpdateInstanceConfig - *

    Updates an instance config. The returned [long-running operation][google.longrunning.Operation] can be used to track the progress of updating the instance. If the named instance config does not exist, returns `NOT_FOUND`. - *

    Only user managed configurations can be updated. + *

    Updates an instance configuration. The returned [long-running operation][google.longrunning.Operation] can be used to track the progress of updating the instance. If the named instance configuration does not exist, returns `NOT_FOUND`. + *

    Only user-managed configurations can be updated. *

    Immediately after the request returns: - *

    * The instance config's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to true. + *

    * The instance configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to true. *

    While the operation is pending: - *

    * Cancelling the operation sets its metadata's [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time]. The operation is guaranteed to succeed at undoing all changes, after which point it terminates with a `CANCELLED` status. * All other attempts to modify the instance config are rejected. * Reading the instance config via the API continues to give the pre-request values. + *

    * Cancelling the operation sets its metadata's [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time]. The operation is guaranteed to succeed at undoing all changes, after which point it terminates with a `CANCELLED` status. * All other attempts to modify the instance configuration are rejected. * Reading the instance configuration via the API continues to give the pre-request values. *

    Upon completion of the returned operation: - *

    * Creating instances using the instance configuration uses the new values. * The instance config's new values are readable via the API. * The instance config's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. - *

    The returned [long-running operation][google.longrunning.Operation] will have a name of the format `<instance_config_name>/operations/<operation_id>` and can be used to track the instance config modification. The [metadata][google.longrunning.Operation.metadata] field type is [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata]. The [response][google.longrunning.Operation.response] field type is [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if successful. + *

    * Creating instances using the instance configuration uses the new values. * The new values of the instance configuration are readable via the API. * The instance configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. + *

    The returned [long-running operation][google.longrunning.Operation] will have a name of the format `<instance_config_name>/operations/<operation_id>` and can be used to track the instance configuration modification. The [metadata][google.longrunning.Operation.metadata] field type is [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata]. The [response][google.longrunning.Operation.response] field type is [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if successful. *

    Authorization requires `spanner.instanceConfigs.update` permission on the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name]. * *

    Request object method variants only take one parameter, a request object, which must be constructed before the call.

    @@ -222,8 +225,8 @@ * * *

    DeleteInstanceConfig - *

    Deletes the instance config. Deletion is only allowed when no instances are using the configuration. If any instances are using the config, returns `FAILED_PRECONDITION`. - *

    Only user managed configurations can be deleted. + *

    Deletes the instance configuration. Deletion is only allowed when no instances are using the configuration. If any instances are using the configuration, returns `FAILED_PRECONDITION`. + *

    Only user-managed configurations can be deleted. *

    Authorization requires `spanner.instanceConfigs.delete` permission on the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name]. * *

    Request object method variants only take one parameter, a request object, which must be constructed before the call.

    @@ -243,7 +246,7 @@ * * *

    ListInstanceConfigOperations - *

    Lists the user-managed instance config [long-running operations][google.longrunning.Operation] in the given project. An instance config operation has a name of the form `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`. The long-running operation [metadata][google.longrunning.Operation.metadata] field type `metadata.type_url` describes the type of the metadata. Operations returned include those that have completed/failed/canceled within the last 7 days, and pending operations. Operations returned are ordered by `operation.metadata.value.start_time` in descending order starting from the most recently started operation. + *

    Lists the user-managed instance configuration [long-running operations][google.longrunning.Operation] in the given project. An instance configuration operation has a name of the form `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`. The long-running operation [metadata][google.longrunning.Operation.metadata] field type `metadata.type_url` describes the type of the metadata. Operations returned include those that have completed/failed/canceled within the last 7 days, and pending operations. Operations returned are ordered by `operation.metadata.value.start_time` in descending order starting from the most recently started operation. * *

    Request object method variants only take one parameter, a request object, which must be constructed before the call.

    *
      @@ -571,6 +574,32 @@ *
    * * + * + *

    MoveInstance + *

    Moves an instance to the target instance configuration. You can use the returned [long-running operation][google.longrunning.Operation] to track the progress of moving the instance. + *

    `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of the following criteria: + *

    * Is undergoing a move to a different instance configuration * Has backups * Has an ongoing update * Contains any CMEK-enabled databases * Is a free trial instance + *

    While the operation is pending: + *

    * All other attempts to modify the instance, including changes to its compute capacity, are rejected. * The following database and backup admin operations are rejected: + *

    * `DatabaseAdmin.CreateDatabase` * `DatabaseAdmin.UpdateDatabaseDdl` (disabled if default_leader is specified in the request.) * `DatabaseAdmin.RestoreDatabase` * `DatabaseAdmin.CreateBackup` * `DatabaseAdmin.CopyBackup` + *

    * Both the source and target instance configurations are subject to hourly compute and storage charges. * The instance might experience higher read-write latencies and a higher transaction abort rate. However, moving an instance doesn't cause any downtime. + *

    The returned [long-running operation][google.longrunning.Operation] has a name of the format `<instance_name>/operations/<operation_id>` and can be used to track the move instance operation. The [metadata][google.longrunning.Operation.metadata] field type is [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata]. The [response][google.longrunning.Operation.response] field type is [Instance][google.spanner.admin.instance.v1.Instance], if successful. Cancelling the operation sets its metadata's [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time]. Cancellation is not immediate because it involves moving any data previously moved to the target instance configuration back to the original instance configuration. You can use this operation to track the progress of the cancellation. Upon successful completion of the cancellation, the operation terminates with `CANCELLED` status. + *

    If not cancelled, upon completion of the returned operation: + *

    * The instance successfully moves to the target instance configuration. * You are billed for compute and storage in target instance configuration. + *

    Authorization requires the `spanner.instances.update` permission on the resource [instance][google.spanner.admin.instance.v1.Instance]. + *

    For more details, see [Move an instance](https://cloud.google.com/spanner/docs/move-instance). + * + *

    Request object method variants only take one parameter, a request object, which must be constructed before the call.

    + *
      + *
    • moveInstanceAsync(MoveInstanceRequest request) + *

    + *

    Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

    + *
      + *
    • moveInstanceOperationCallable() + *

    • moveInstanceCallable() + *

    + * + * * * *

    See the individual methods for example code. @@ -984,33 +1013,35 @@ public final UnaryCallable getInstance // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Creates an instance config and begins preparing it to be used. The returned [long-running - * operation][google.longrunning.Operation] can be used to track the progress of preparing the new - * instance config. The instance config name is assigned by the caller. If the named instance - * config already exists, `CreateInstanceConfig` returns `ALREADY_EXISTS`. + * Creates an instance configuration and begins preparing it to be used. The returned + * [long-running operation][google.longrunning.Operation] can be used to track the progress of + * preparing the new instance configuration. The instance configuration name is assigned by the + * caller. If the named instance configuration already exists, `CreateInstanceConfig` returns + * `ALREADY_EXISTS`. * *

    Immediately after the request returns: * - *

    * The instance config is readable via the API, with all requested attributes. The - * instance config's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] - * field is set to true. Its state is `CREATING`. + *

    * The instance configuration is readable via the API, with all requested attributes. The + * instance configuration's + * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to + * true. Its state is `CREATING`. * *

    While the operation is pending: * - *

    * Cancelling the operation renders the instance config immediately unreadable via the - * API. * Except for deleting the creating resource, all other attempts to modify the instance - * config are rejected. + *

    * Cancelling the operation renders the instance configuration immediately unreadable via + * the API. * Except for deleting the creating resource, all other attempts to modify the + * instance configuration are rejected. * *

    Upon completion of the returned operation: * - *

    * Instances can be created using the instance configuration. * The instance config's - * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. - * Its state becomes `READY`. + *

    * Instances can be created using the instance configuration. * The instance + * configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + * field becomes false. Its state becomes `READY`. * *

    The returned [long-running operation][google.longrunning.Operation] will have a name of the * format `<instance_config_name>/operations/<operation_id>` and can be used to track - * creation of the instance config. The [metadata][google.longrunning.Operation.metadata] field - * type is + * creation of the instance configuration. The [metadata][google.longrunning.Operation.metadata] + * field type is * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. * The [response][google.longrunning.Operation.response] field type is * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if successful. @@ -1037,16 +1068,16 @@ public final UnaryCallable getInstance * } * } * - * @param parent Required. The name of the project in which to create the instance config. Values - * are of the form `projects/<project>`. + * @param parent Required. The name of the project in which to create the instance configuration. + * Values are of the form `projects/<project>`. * @param instanceConfig Required. The InstanceConfig proto of the configuration to create. * instance_config.name must be `<parent>/instanceConfigs/<instance_config_id>`. * instance_config.base_config must be a Google managed configuration name, e.g. * <parent>/instanceConfigs/us-east1, <parent>/instanceConfigs/nam3. - * @param instanceConfigId Required. The ID of the instance config to create. Valid identifiers - * are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64 characters in - * length. The `custom-` prefix is required to avoid name conflicts with Google managed - * configurations. + * @param instanceConfigId Required. The ID of the instance configuration to create. Valid + * identifiers are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64 + * characters in length. The `custom-` prefix is required to avoid name conflicts with + * Google-managed configurations. * @throws com.google.api.gax.rpc.ApiException if the remote call fails */ public final OperationFuture @@ -1063,33 +1094,35 @@ public final UnaryCallable getInstance // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Creates an instance config and begins preparing it to be used. The returned [long-running - * operation][google.longrunning.Operation] can be used to track the progress of preparing the new - * instance config. The instance config name is assigned by the caller. If the named instance - * config already exists, `CreateInstanceConfig` returns `ALREADY_EXISTS`. + * Creates an instance configuration and begins preparing it to be used. The returned + * [long-running operation][google.longrunning.Operation] can be used to track the progress of + * preparing the new instance configuration. The instance configuration name is assigned by the + * caller. If the named instance configuration already exists, `CreateInstanceConfig` returns + * `ALREADY_EXISTS`. * *

    Immediately after the request returns: * - *

    * The instance config is readable via the API, with all requested attributes. The - * instance config's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] - * field is set to true. Its state is `CREATING`. + *

    * The instance configuration is readable via the API, with all requested attributes. The + * instance configuration's + * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to + * true. Its state is `CREATING`. * *

    While the operation is pending: * - *

    * Cancelling the operation renders the instance config immediately unreadable via the - * API. * Except for deleting the creating resource, all other attempts to modify the instance - * config are rejected. + *

    * Cancelling the operation renders the instance configuration immediately unreadable via + * the API. * Except for deleting the creating resource, all other attempts to modify the + * instance configuration are rejected. * *

    Upon completion of the returned operation: * - *

    * Instances can be created using the instance configuration. * The instance config's - * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. - * Its state becomes `READY`. + *

    * Instances can be created using the instance configuration. * The instance + * configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + * field becomes false. Its state becomes `READY`. * *

    The returned [long-running operation][google.longrunning.Operation] will have a name of the * format `<instance_config_name>/operations/<operation_id>` and can be used to track - * creation of the instance config. The [metadata][google.longrunning.Operation.metadata] field - * type is + * creation of the instance configuration. The [metadata][google.longrunning.Operation.metadata] + * field type is * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. * The [response][google.longrunning.Operation.response] field type is * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if successful. @@ -1116,16 +1149,16 @@ public final UnaryCallable getInstance * } * } * - * @param parent Required. The name of the project in which to create the instance config. Values - * are of the form `projects/<project>`. + * @param parent Required. The name of the project in which to create the instance configuration. + * Values are of the form `projects/<project>`. * @param instanceConfig Required. The InstanceConfig proto of the configuration to create. * instance_config.name must be `<parent>/instanceConfigs/<instance_config_id>`. * instance_config.base_config must be a Google managed configuration name, e.g. * <parent>/instanceConfigs/us-east1, <parent>/instanceConfigs/nam3. - * @param instanceConfigId Required. The ID of the instance config to create. Valid identifiers - * are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64 characters in - * length. The `custom-` prefix is required to avoid name conflicts with Google managed - * configurations. + * @param instanceConfigId Required. The ID of the instance configuration to create. Valid + * identifiers are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64 + * characters in length. The `custom-` prefix is required to avoid name conflicts with + * Google-managed configurations. * @throws com.google.api.gax.rpc.ApiException if the remote call fails */ public final OperationFuture @@ -1142,33 +1175,35 @@ public final UnaryCallable getInstance // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Creates an instance config and begins preparing it to be used. The returned [long-running - * operation][google.longrunning.Operation] can be used to track the progress of preparing the new - * instance config. The instance config name is assigned by the caller. If the named instance - * config already exists, `CreateInstanceConfig` returns `ALREADY_EXISTS`. + * Creates an instance configuration and begins preparing it to be used. The returned + * [long-running operation][google.longrunning.Operation] can be used to track the progress of + * preparing the new instance configuration. The instance configuration name is assigned by the + * caller. If the named instance configuration already exists, `CreateInstanceConfig` returns + * `ALREADY_EXISTS`. * *

    Immediately after the request returns: * - *

    * The instance config is readable via the API, with all requested attributes. The - * instance config's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] - * field is set to true. Its state is `CREATING`. + *

    * The instance configuration is readable via the API, with all requested attributes. The + * instance configuration's + * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to + * true. Its state is `CREATING`. * *

    While the operation is pending: * - *

    * Cancelling the operation renders the instance config immediately unreadable via the - * API. * Except for deleting the creating resource, all other attempts to modify the instance - * config are rejected. + *

    * Cancelling the operation renders the instance configuration immediately unreadable via + * the API. * Except for deleting the creating resource, all other attempts to modify the + * instance configuration are rejected. * *

    Upon completion of the returned operation: * - *

    * Instances can be created using the instance configuration. * The instance config's - * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. - * Its state becomes `READY`. + *

    * Instances can be created using the instance configuration. * The instance + * configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + * field becomes false. Its state becomes `READY`. * *

    The returned [long-running operation][google.longrunning.Operation] will have a name of the * format `<instance_config_name>/operations/<operation_id>` and can be used to track - * creation of the instance config. The [metadata][google.longrunning.Operation.metadata] field - * type is + * creation of the instance configuration. The [metadata][google.longrunning.Operation.metadata] + * field type is * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. * The [response][google.longrunning.Operation.response] field type is * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if successful. @@ -1206,33 +1241,35 @@ public final UnaryCallable getInstance // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Creates an instance config and begins preparing it to be used. The returned [long-running - * operation][google.longrunning.Operation] can be used to track the progress of preparing the new - * instance config. The instance config name is assigned by the caller. If the named instance - * config already exists, `CreateInstanceConfig` returns `ALREADY_EXISTS`. + * Creates an instance configuration and begins preparing it to be used. The returned + * [long-running operation][google.longrunning.Operation] can be used to track the progress of + * preparing the new instance configuration. The instance configuration name is assigned by the + * caller. If the named instance configuration already exists, `CreateInstanceConfig` returns + * `ALREADY_EXISTS`. * *

    Immediately after the request returns: * - *

    * The instance config is readable via the API, with all requested attributes. The - * instance config's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] - * field is set to true. Its state is `CREATING`. + *

    * The instance configuration is readable via the API, with all requested attributes. The + * instance configuration's + * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to + * true. Its state is `CREATING`. * *

    While the operation is pending: * - *

    * Cancelling the operation renders the instance config immediately unreadable via the - * API. * Except for deleting the creating resource, all other attempts to modify the instance - * config are rejected. + *

    * Cancelling the operation renders the instance configuration immediately unreadable via + * the API. * Except for deleting the creating resource, all other attempts to modify the + * instance configuration are rejected. * *

    Upon completion of the returned operation: * - *

    * Instances can be created using the instance configuration. * The instance config's - * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. - * Its state becomes `READY`. + *

    * Instances can be created using the instance configuration. * The instance + * configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + * field becomes false. Its state becomes `READY`. * *

    The returned [long-running operation][google.longrunning.Operation] will have a name of the * format `<instance_config_name>/operations/<operation_id>` and can be used to track - * creation of the instance config. The [metadata][google.longrunning.Operation.metadata] field - * type is + * creation of the instance configuration. The [metadata][google.longrunning.Operation.metadata] + * field type is * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. * The [response][google.longrunning.Operation.response] field type is * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if successful. @@ -1271,33 +1308,35 @@ public final UnaryCallable getInstance // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Creates an instance config and begins preparing it to be used. The returned [long-running - * operation][google.longrunning.Operation] can be used to track the progress of preparing the new - * instance config. The instance config name is assigned by the caller. If the named instance - * config already exists, `CreateInstanceConfig` returns `ALREADY_EXISTS`. + * Creates an instance configuration and begins preparing it to be used. The returned + * [long-running operation][google.longrunning.Operation] can be used to track the progress of + * preparing the new instance configuration. The instance configuration name is assigned by the + * caller. If the named instance configuration already exists, `CreateInstanceConfig` returns + * `ALREADY_EXISTS`. * *

    Immediately after the request returns: * - *

    * The instance config is readable via the API, with all requested attributes. The - * instance config's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] - * field is set to true. Its state is `CREATING`. + *

    * The instance configuration is readable via the API, with all requested attributes. The + * instance configuration's + * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to + * true. Its state is `CREATING`. * *

    While the operation is pending: * - *

    * Cancelling the operation renders the instance config immediately unreadable via the - * API. * Except for deleting the creating resource, all other attempts to modify the instance - * config are rejected. + *

    * Cancelling the operation renders the instance configuration immediately unreadable via + * the API. * Except for deleting the creating resource, all other attempts to modify the + * instance configuration are rejected. * *

    Upon completion of the returned operation: * - *

    * Instances can be created using the instance configuration. * The instance config's - * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. - * Its state becomes `READY`. + *

    * Instances can be created using the instance configuration. * The instance + * configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + * field becomes false. Its state becomes `READY`. * *

    The returned [long-running operation][google.longrunning.Operation] will have a name of the * format `<instance_config_name>/operations/<operation_id>` and can be used to track - * creation of the instance config. The [metadata][google.longrunning.Operation.metadata] field - * type is + * creation of the instance configuration. The [metadata][google.longrunning.Operation.metadata] + * field type is * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. * The [response][google.longrunning.Operation.response] field type is * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if successful. @@ -1335,15 +1374,15 @@ public final UnaryCallable getInstance // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Updates an instance config. The returned [long-running operation][google.longrunning.Operation] - * can be used to track the progress of updating the instance. If the named instance config does - * not exist, returns `NOT_FOUND`. + * Updates an instance configuration. The returned [long-running + * operation][google.longrunning.Operation] can be used to track the progress of updating the + * instance. If the named instance configuration does not exist, returns `NOT_FOUND`. * - *

    Only user managed configurations can be updated. + *

    Only user-managed configurations can be updated. * *

    Immediately after the request returns: * - *

    * The instance config's + *

    * The instance configuration's * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to * true. * @@ -1352,19 +1391,21 @@ public final UnaryCallable getInstance *

    * Cancelling the operation sets its metadata's * [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time]. The * operation is guaranteed to succeed at undoing all changes, after which point it terminates with - * a `CANCELLED` status. * All other attempts to modify the instance config are rejected. - * * Reading the instance config via the API continues to give the pre-request values. + * a `CANCELLED` status. * All other attempts to modify the instance configuration are + * rejected. * Reading the instance configuration via the API continues to give the + * pre-request values. * *

    Upon completion of the returned operation: * - *

    * Creating instances using the instance configuration uses the new values. * The - * instance config's new values are readable via the API. * The instance config's - * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. + *

    * Creating instances using the instance configuration uses the new values. * The new + * values of the instance configuration are readable via the API. * The instance + * configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + * field becomes false. * *

    The returned [long-running operation][google.longrunning.Operation] will have a name of the * format `<instance_config_name>/operations/<operation_id>` and can be used to track - * the instance config modification. The [metadata][google.longrunning.Operation.metadata] field - * type is + * the instance configuration modification. The [metadata][google.longrunning.Operation.metadata] + * field type is * [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata]. * The [response][google.longrunning.Operation.response] field type is * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if successful. @@ -1388,8 +1429,8 @@ public final UnaryCallable getInstance * } * } * - * @param instanceConfig Required. The user instance config to update, which must always include - * the instance config name. Otherwise, only fields mentioned in + * @param instanceConfig Required. The user instance configuration to update, which must always + * include the instance configuration name. Otherwise, only fields mentioned in * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask] * need be included. To prevent conflicts of concurrent updates, * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can be used. @@ -1413,15 +1454,15 @@ public final UnaryCallable getInstance // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Updates an instance config. The returned [long-running operation][google.longrunning.Operation] - * can be used to track the progress of updating the instance. If the named instance config does - * not exist, returns `NOT_FOUND`. + * Updates an instance configuration. The returned [long-running + * operation][google.longrunning.Operation] can be used to track the progress of updating the + * instance. If the named instance configuration does not exist, returns `NOT_FOUND`. * - *

    Only user managed configurations can be updated. + *

    Only user-managed configurations can be updated. * *

    Immediately after the request returns: * - *

    * The instance config's + *

    * The instance configuration's * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to * true. * @@ -1430,19 +1471,21 @@ public final UnaryCallable getInstance *

    * Cancelling the operation sets its metadata's * [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time]. The * operation is guaranteed to succeed at undoing all changes, after which point it terminates with - * a `CANCELLED` status. * All other attempts to modify the instance config are rejected. - * * Reading the instance config via the API continues to give the pre-request values. + * a `CANCELLED` status. * All other attempts to modify the instance configuration are + * rejected. * Reading the instance configuration via the API continues to give the + * pre-request values. * *

    Upon completion of the returned operation: * - *

    * Creating instances using the instance configuration uses the new values. * The - * instance config's new values are readable via the API. * The instance config's - * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. + *

    * Creating instances using the instance configuration uses the new values. * The new + * values of the instance configuration are readable via the API. * The instance + * configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + * field becomes false. * *

    The returned [long-running operation][google.longrunning.Operation] will have a name of the * format `<instance_config_name>/operations/<operation_id>` and can be used to track - * the instance config modification. The [metadata][google.longrunning.Operation.metadata] field - * type is + * the instance configuration modification. The [metadata][google.longrunning.Operation.metadata] + * field type is * [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata]. * The [response][google.longrunning.Operation.response] field type is * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if successful. @@ -1479,15 +1522,15 @@ public final UnaryCallable getInstance // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Updates an instance config. The returned [long-running operation][google.longrunning.Operation] - * can be used to track the progress of updating the instance. If the named instance config does - * not exist, returns `NOT_FOUND`. + * Updates an instance configuration. The returned [long-running + * operation][google.longrunning.Operation] can be used to track the progress of updating the + * instance. If the named instance configuration does not exist, returns `NOT_FOUND`. * - *

    Only user managed configurations can be updated. + *

    Only user-managed configurations can be updated. * *

    Immediately after the request returns: * - *

    * The instance config's + *

    * The instance configuration's * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to * true. * @@ -1496,19 +1539,21 @@ public final UnaryCallable getInstance *

    * Cancelling the operation sets its metadata's * [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time]. The * operation is guaranteed to succeed at undoing all changes, after which point it terminates with - * a `CANCELLED` status. * All other attempts to modify the instance config are rejected. - * * Reading the instance config via the API continues to give the pre-request values. + * a `CANCELLED` status. * All other attempts to modify the instance configuration are + * rejected. * Reading the instance configuration via the API continues to give the + * pre-request values. * *

    Upon completion of the returned operation: * - *

    * Creating instances using the instance configuration uses the new values. * The - * instance config's new values are readable via the API. * The instance config's - * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. + *

    * Creating instances using the instance configuration uses the new values. * The new + * values of the instance configuration are readable via the API. * The instance + * configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + * field becomes false. * *

    The returned [long-running operation][google.longrunning.Operation] will have a name of the * format `<instance_config_name>/operations/<operation_id>` and can be used to track - * the instance config modification. The [metadata][google.longrunning.Operation.metadata] field - * type is + * the instance configuration modification. The [metadata][google.longrunning.Operation.metadata] + * field type is * [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata]. * The [response][google.longrunning.Operation.response] field type is * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if successful. @@ -1546,15 +1591,15 @@ public final UnaryCallable getInstance // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Updates an instance config. The returned [long-running operation][google.longrunning.Operation] - * can be used to track the progress of updating the instance. If the named instance config does - * not exist, returns `NOT_FOUND`. + * Updates an instance configuration. The returned [long-running + * operation][google.longrunning.Operation] can be used to track the progress of updating the + * instance. If the named instance configuration does not exist, returns `NOT_FOUND`. * - *

    Only user managed configurations can be updated. + *

    Only user-managed configurations can be updated. * *

    Immediately after the request returns: * - *

    * The instance config's + *

    * The instance configuration's * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to * true. * @@ -1563,19 +1608,21 @@ public final UnaryCallable getInstance *

    * Cancelling the operation sets its metadata's * [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time]. The * operation is guaranteed to succeed at undoing all changes, after which point it terminates with - * a `CANCELLED` status. * All other attempts to modify the instance config are rejected. - * * Reading the instance config via the API continues to give the pre-request values. + * a `CANCELLED` status. * All other attempts to modify the instance configuration are + * rejected. * Reading the instance configuration via the API continues to give the + * pre-request values. * *

    Upon completion of the returned operation: * - *

    * Creating instances using the instance configuration uses the new values. * The - * instance config's new values are readable via the API. * The instance config's - * [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. + *

    * Creating instances using the instance configuration uses the new values. * The new + * values of the instance configuration are readable via the API. * The instance + * configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] + * field becomes false. * *

    The returned [long-running operation][google.longrunning.Operation] will have a name of the * format `<instance_config_name>/operations/<operation_id>` and can be used to track - * the instance config modification. The [metadata][google.longrunning.Operation.metadata] field - * type is + * the instance configuration modification. The [metadata][google.longrunning.Operation.metadata] + * field type is * [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata]. * The [response][google.longrunning.Operation.response] field type is * [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if successful. @@ -1612,10 +1659,10 @@ public final UnaryCallable getInstance // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Deletes the instance config. Deletion is only allowed when no instances are using the - * configuration. If any instances are using the config, returns `FAILED_PRECONDITION`. + * Deletes the instance configuration. Deletion is only allowed when no instances are using the + * configuration. If any instances are using the configuration, returns `FAILED_PRECONDITION`. * - *

    Only user managed configurations can be deleted. + *

    Only user-managed configurations can be deleted. * *

    Authorization requires `spanner.instanceConfigs.delete` permission on the resource * [name][google.spanner.admin.instance.v1.InstanceConfig.name]. @@ -1648,10 +1695,10 @@ public final void deleteInstanceConfig(InstanceConfigName name) { // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Deletes the instance config. Deletion is only allowed when no instances are using the - * configuration. If any instances are using the config, returns `FAILED_PRECONDITION`. + * Deletes the instance configuration. Deletion is only allowed when no instances are using the + * configuration. If any instances are using the configuration, returns `FAILED_PRECONDITION`. * - *

    Only user managed configurations can be deleted. + *

    Only user-managed configurations can be deleted. * *

    Authorization requires `spanner.instanceConfigs.delete` permission on the resource * [name][google.spanner.admin.instance.v1.InstanceConfig.name]. @@ -1682,10 +1729,10 @@ public final void deleteInstanceConfig(String name) { // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Deletes the instance config. Deletion is only allowed when no instances are using the - * configuration. If any instances are using the config, returns `FAILED_PRECONDITION`. + * Deletes the instance configuration. Deletion is only allowed when no instances are using the + * configuration. If any instances are using the configuration, returns `FAILED_PRECONDITION`. * - *

    Only user managed configurations can be deleted. + *

    Only user-managed configurations can be deleted. * *

    Authorization requires `spanner.instanceConfigs.delete` permission on the resource * [name][google.spanner.admin.instance.v1.InstanceConfig.name]. @@ -1718,10 +1765,10 @@ public final void deleteInstanceConfig(DeleteInstanceConfigRequest request) { // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Deletes the instance config. Deletion is only allowed when no instances are using the - * configuration. If any instances are using the config, returns `FAILED_PRECONDITION`. + * Deletes the instance configuration. Deletion is only allowed when no instances are using the + * configuration. If any instances are using the configuration, returns `FAILED_PRECONDITION`. * - *

    Only user managed configurations can be deleted. + *

    Only user-managed configurations can be deleted. * *

    Authorization requires `spanner.instanceConfigs.delete` permission on the resource * [name][google.spanner.admin.instance.v1.InstanceConfig.name]. @@ -1754,8 +1801,9 @@ public final UnaryCallable deleteInstanceCon // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Lists the user-managed instance config [long-running operations][google.longrunning.Operation] - * in the given project. An instance config operation has a name of the form + * Lists the user-managed instance configuration [long-running + * operations][google.longrunning.Operation] in the given project. An instance configuration + * operation has a name of the form * `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`. * The long-running operation [metadata][google.longrunning.Operation.metadata] field type * `metadata.type_url` describes the type of the metadata. Operations returned include those that @@ -1780,8 +1828,8 @@ public final UnaryCallable deleteInstanceCon * } * } * - * @param parent Required. The project of the instance config operations. Values are of the form - * `projects/<project>`. + * @param parent Required. The project of the instance configuration operations. Values are of the + * form `projects/<project>`. * @throws com.google.api.gax.rpc.ApiException if the remote call fails */ public final ListInstanceConfigOperationsPagedResponse listInstanceConfigOperations( @@ -1795,8 +1843,9 @@ public final ListInstanceConfigOperationsPagedResponse listInstanceConfigOperati // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Lists the user-managed instance config [long-running operations][google.longrunning.Operation] - * in the given project. An instance config operation has a name of the form + * Lists the user-managed instance configuration [long-running + * operations][google.longrunning.Operation] in the given project. An instance configuration + * operation has a name of the form * `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`. * The long-running operation [metadata][google.longrunning.Operation.metadata] field type * `metadata.type_url` describes the type of the metadata. Operations returned include those that @@ -1821,8 +1870,8 @@ public final ListInstanceConfigOperationsPagedResponse listInstanceConfigOperati * } * } * - * @param parent Required. The project of the instance config operations. Values are of the form - * `projects/<project>`. + * @param parent Required. The project of the instance configuration operations. Values are of the + * form `projects/<project>`. * @throws com.google.api.gax.rpc.ApiException if the remote call fails */ public final ListInstanceConfigOperationsPagedResponse listInstanceConfigOperations( @@ -1834,8 +1883,9 @@ public final ListInstanceConfigOperationsPagedResponse listInstanceConfigOperati // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Lists the user-managed instance config [long-running operations][google.longrunning.Operation] - * in the given project. An instance config operation has a name of the form + * Lists the user-managed instance configuration [long-running + * operations][google.longrunning.Operation] in the given project. An instance configuration + * operation has a name of the form * `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`. * The long-running operation [metadata][google.longrunning.Operation.metadata] field type * `metadata.type_url` describes the type of the metadata. Operations returned include those that @@ -1876,8 +1926,9 @@ public final ListInstanceConfigOperationsPagedResponse listInstanceConfigOperati // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Lists the user-managed instance config [long-running operations][google.longrunning.Operation] - * in the given project. An instance config operation has a name of the form + * Lists the user-managed instance configuration [long-running + * operations][google.longrunning.Operation] in the given project. An instance configuration + * operation has a name of the form * `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`. * The long-running operation [metadata][google.longrunning.Operation.metadata] field type * `metadata.type_url` describes the type of the metadata. Operations returned include those that @@ -1918,8 +1969,9 @@ public final ListInstanceConfigOperationsPagedResponse listInstanceConfigOperati // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Lists the user-managed instance config [long-running operations][google.longrunning.Operation] - * in the given project. An instance config operation has a name of the form + * Lists the user-managed instance configuration [long-running + * operations][google.longrunning.Operation] in the given project. An instance configuration + * operation has a name of the form * `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`. * The long-running operation [metadata][google.longrunning.Operation.metadata] field type * `metadata.type_url` describes the type of the metadata. Operations returned include those that @@ -4642,6 +4694,226 @@ public final ListInstancePartitionOperationsPagedResponse listInstancePartitionO return stub.listInstancePartitionOperationsCallable(); } + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Moves an instance to the target instance configuration. You can use the returned [long-running + * operation][google.longrunning.Operation] to track the progress of moving the instance. + * + *

    `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of the following + * criteria: + * + *

    * Is undergoing a move to a different instance configuration * Has backups * Has + * an ongoing update * Contains any CMEK-enabled databases * Is a free trial instance + * + *

    While the operation is pending: + * + *

    * All other attempts to modify the instance, including changes to its compute capacity, + * are rejected. * The following database and backup admin operations are rejected: + * + *

    * `DatabaseAdmin.CreateDatabase` * `DatabaseAdmin.UpdateDatabaseDdl` (disabled if + * default_leader is specified in the request.) * `DatabaseAdmin.RestoreDatabase` * + * `DatabaseAdmin.CreateBackup` * `DatabaseAdmin.CopyBackup` + * + *

    * Both the source and target instance configurations are subject to hourly compute and + * storage charges. * The instance might experience higher read-write latencies and a higher + * transaction abort rate. However, moving an instance doesn't cause any downtime. + * + *

    The returned [long-running operation][google.longrunning.Operation] has a name of the format + * `<instance_name>/operations/<operation_id>` and can be used to track the move + * instance operation. The [metadata][google.longrunning.Operation.metadata] field type is + * [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata]. The + * [response][google.longrunning.Operation.response] field type is + * [Instance][google.spanner.admin.instance.v1.Instance], if successful. Cancelling the operation + * sets its metadata's + * [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time]. Cancellation + * is not immediate because it involves moving any data previously moved to the target instance + * configuration back to the original instance configuration. You can use this operation to track + * the progress of the cancellation. Upon successful completion of the cancellation, the operation + * terminates with `CANCELLED` status. + * + *

    If not cancelled, upon completion of the returned operation: + * + *

    * The instance successfully moves to the target instance configuration. * You are + * billed for compute and storage in target instance configuration. + * + *

    Authorization requires the `spanner.instances.update` permission on the resource + * [instance][google.spanner.admin.instance.v1.Instance]. + * + *

    For more details, see [Move an + * instance](https://cloud.google.com/spanner/docs/move-instance). + * + *

    Sample code: + * + *

    {@code
    +   * // This snippet has been automatically generated and should be regarded as a code template only.
    +   * // It will require modifications to work:
    +   * // - It may require correct/in-range values for request initialization.
    +   * // - It may require specifying regional endpoints when creating the service client as shown in
    +   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    +   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
    +   *   MoveInstanceRequest request =
    +   *       MoveInstanceRequest.newBuilder()
    +   *           .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
    +   *           .setTargetConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString())
    +   *           .build();
    +   *   MoveInstanceResponse response = instanceAdminClient.moveInstanceAsync(request).get();
    +   * }
    +   * }
    + * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final OperationFuture moveInstanceAsync( + MoveInstanceRequest request) { + return moveInstanceOperationCallable().futureCall(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Moves an instance to the target instance configuration. You can use the returned [long-running + * operation][google.longrunning.Operation] to track the progress of moving the instance. + * + *

    `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of the following + * criteria: + * + *

    * Is undergoing a move to a different instance configuration * Has backups * Has + * an ongoing update * Contains any CMEK-enabled databases * Is a free trial instance + * + *

    While the operation is pending: + * + *

    * All other attempts to modify the instance, including changes to its compute capacity, + * are rejected. * The following database and backup admin operations are rejected: + * + *

    * `DatabaseAdmin.CreateDatabase` * `DatabaseAdmin.UpdateDatabaseDdl` (disabled if + * default_leader is specified in the request.) * `DatabaseAdmin.RestoreDatabase` * + * `DatabaseAdmin.CreateBackup` * `DatabaseAdmin.CopyBackup` + * + *

    * Both the source and target instance configurations are subject to hourly compute and + * storage charges. * The instance might experience higher read-write latencies and a higher + * transaction abort rate. However, moving an instance doesn't cause any downtime. + * + *

    The returned [long-running operation][google.longrunning.Operation] has a name of the format + * `<instance_name>/operations/<operation_id>` and can be used to track the move + * instance operation. The [metadata][google.longrunning.Operation.metadata] field type is + * [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata]. The + * [response][google.longrunning.Operation.response] field type is + * [Instance][google.spanner.admin.instance.v1.Instance], if successful. Cancelling the operation + * sets its metadata's + * [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time]. Cancellation + * is not immediate because it involves moving any data previously moved to the target instance + * configuration back to the original instance configuration. You can use this operation to track + * the progress of the cancellation. Upon successful completion of the cancellation, the operation + * terminates with `CANCELLED` status. + * + *

    If not cancelled, upon completion of the returned operation: + * + *

    * The instance successfully moves to the target instance configuration. * You are + * billed for compute and storage in target instance configuration. + * + *

    Authorization requires the `spanner.instances.update` permission on the resource + * [instance][google.spanner.admin.instance.v1.Instance]. + * + *

    For more details, see [Move an + * instance](https://cloud.google.com/spanner/docs/move-instance). + * + *

    Sample code: + * + *

    {@code
    +   * // This snippet has been automatically generated and should be regarded as a code template only.
    +   * // It will require modifications to work:
    +   * // - It may require correct/in-range values for request initialization.
    +   * // - It may require specifying regional endpoints when creating the service client as shown in
    +   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    +   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
    +   *   MoveInstanceRequest request =
    +   *       MoveInstanceRequest.newBuilder()
    +   *           .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
    +   *           .setTargetConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString())
    +   *           .build();
    +   *   OperationFuture future =
    +   *       instanceAdminClient.moveInstanceOperationCallable().futureCall(request);
    +   *   // Do something.
    +   *   MoveInstanceResponse response = future.get();
    +   * }
    +   * }
    + */ + public final OperationCallable + moveInstanceOperationCallable() { + return stub.moveInstanceOperationCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * Moves an instance to the target instance configuration. You can use the returned [long-running + * operation][google.longrunning.Operation] to track the progress of moving the instance. + * + *

    `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of the following + * criteria: + * + *

    * Is undergoing a move to a different instance configuration * Has backups * Has + * an ongoing update * Contains any CMEK-enabled databases * Is a free trial instance + * + *

    While the operation is pending: + * + *

    * All other attempts to modify the instance, including changes to its compute capacity, + * are rejected. * The following database and backup admin operations are rejected: + * + *

    * `DatabaseAdmin.CreateDatabase` * `DatabaseAdmin.UpdateDatabaseDdl` (disabled if + * default_leader is specified in the request.) * `DatabaseAdmin.RestoreDatabase` * + * `DatabaseAdmin.CreateBackup` * `DatabaseAdmin.CopyBackup` + * + *

    * Both the source and target instance configurations are subject to hourly compute and + * storage charges. * The instance might experience higher read-write latencies and a higher + * transaction abort rate. However, moving an instance doesn't cause any downtime. + * + *

    The returned [long-running operation][google.longrunning.Operation] has a name of the format + * `<instance_name>/operations/<operation_id>` and can be used to track the move + * instance operation. The [metadata][google.longrunning.Operation.metadata] field type is + * [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata]. The + * [response][google.longrunning.Operation.response] field type is + * [Instance][google.spanner.admin.instance.v1.Instance], if successful. Cancelling the operation + * sets its metadata's + * [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time]. Cancellation + * is not immediate because it involves moving any data previously moved to the target instance + * configuration back to the original instance configuration. You can use this operation to track + * the progress of the cancellation. Upon successful completion of the cancellation, the operation + * terminates with `CANCELLED` status. + * + *

    If not cancelled, upon completion of the returned operation: + * + *

    * The instance successfully moves to the target instance configuration. * You are + * billed for compute and storage in target instance configuration. + * + *

    Authorization requires the `spanner.instances.update` permission on the resource + * [instance][google.spanner.admin.instance.v1.Instance]. + * + *

    For more details, see [Move an + * instance](https://cloud.google.com/spanner/docs/move-instance). + * + *

    Sample code: + * + *

    {@code
    +   * // This snippet has been automatically generated and should be regarded as a code template only.
    +   * // It will require modifications to work:
    +   * // - It may require correct/in-range values for request initialization.
    +   * // - It may require specifying regional endpoints when creating the service client as shown in
    +   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    +   * try (InstanceAdminClient instanceAdminClient = InstanceAdminClient.create()) {
    +   *   MoveInstanceRequest request =
    +   *       MoveInstanceRequest.newBuilder()
    +   *           .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString())
    +   *           .setTargetConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString())
    +   *           .build();
    +   *   ApiFuture future = instanceAdminClient.moveInstanceCallable().futureCall(request);
    +   *   // Do something.
    +   *   Operation response = future.get();
    +   * }
    +   * }
    + */ + public final UnaryCallable moveInstanceCallable() { + return stub.moveInstanceCallable(); + } + @Override public final void close() { stub.close(); diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminSettings.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminSettings.java index cad1b2d7e9c..9fde368566b 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminSettings.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminSettings.java @@ -68,6 +68,9 @@ import com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse; import com.google.spanner.admin.instance.v1.ListInstancesRequest; import com.google.spanner.admin.instance.v1.ListInstancesResponse; +import com.google.spanner.admin.instance.v1.MoveInstanceMetadata; +import com.google.spanner.admin.instance.v1.MoveInstanceRequest; +import com.google.spanner.admin.instance.v1.MoveInstanceResponse; import com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata; import com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest; import com.google.spanner.admin.instance.v1.UpdateInstanceMetadata; @@ -93,7 +96,9 @@ *

    The builder of this class is recursive, so contained classes are themselves builders. When * build() is called, the tree of builders is called to create the complete settings object. * - *

    For example, to set the total timeout of getInstanceConfig to 30 seconds: + *

    For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of getInstanceConfig: * *

    {@code
      * // This snippet has been automatically generated and should be regarded as a code template only.
    @@ -109,10 +114,46 @@
      *             .getInstanceConfigSettings()
      *             .getRetrySettings()
      *             .toBuilder()
    - *             .setTotalTimeout(Duration.ofSeconds(30))
    + *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
    + *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
    + *             .setMaxAttempts(5)
    + *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
    + *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
    + *             .setRetryDelayMultiplier(1.3)
    + *             .setRpcTimeoutMultiplier(1.5)
    + *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
      *             .build());
      * InstanceAdminSettings instanceAdminSettings = instanceAdminSettingsBuilder.build();
      * }
    + * + * Please refer to the [Client Side Retry + * Guide](https://github.com/googleapis/google-cloud-java/blob/main/docs/client_retries.md) for + * additional support in setting retries. + * + *

    To configure the RetrySettings of a Long Running Operation method, create an + * OperationTimedPollAlgorithm object and update the RPC's polling algorithm. For example, to + * configure the RetrySettings for createInstanceConfig: + * + *

    {@code
    + * // This snippet has been automatically generated and should be regarded as a code template only.
    + * // It will require modifications to work:
    + * // - It may require correct/in-range values for request initialization.
    + * // - It may require specifying regional endpoints when creating the service client as shown in
    + * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    + * InstanceAdminSettings.Builder instanceAdminSettingsBuilder = InstanceAdminSettings.newBuilder();
    + * TimedRetryAlgorithm timedRetryAlgorithm =
    + *     OperationalTimedPollAlgorithm.create(
    + *         RetrySettings.newBuilder()
    + *             .setInitialRetryDelayDuration(Duration.ofMillis(500))
    + *             .setRetryDelayMultiplier(1.5)
    + *             .setMaxRetryDelay(Duration.ofMillis(5000))
    + *             .setTotalTimeoutDuration(Duration.ofHours(24))
    + *             .build());
    + * instanceAdminSettingsBuilder
    + *     .createClusterOperationSettings()
    + *     .setPollingAlgorithm(timedRetryAlgorithm)
    + *     .build();
    + * }
    */ @Generated("by gapic-generator-java") public class InstanceAdminSettings extends ClientSettings { @@ -280,6 +321,17 @@ public UnaryCallSettings getIamPolicySettings() { .listInstancePartitionOperationsSettings(); } + /** Returns the object with the settings used for calls to moveInstance. */ + public UnaryCallSettings moveInstanceSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).moveInstanceSettings(); + } + + /** Returns the object with the settings used for calls to moveInstance. */ + public OperationCallSettings + moveInstanceOperationSettings() { + return ((InstanceAdminStubSettings) getStubSettings()).moveInstanceOperationSettings(); + } + public static final InstanceAdminSettings create(InstanceAdminStubSettings stub) throws IOException { return new InstanceAdminSettings.Builder(stub.toBuilder()).build(); @@ -559,6 +611,18 @@ public UnaryCallSettings.Builder getIamPolicySettin return getStubSettingsBuilder().listInstancePartitionOperationsSettings(); } + /** Returns the builder for the settings used for calls to moveInstance. */ + public UnaryCallSettings.Builder moveInstanceSettings() { + return getStubSettingsBuilder().moveInstanceSettings(); + } + + /** Returns the builder for the settings used for calls to moveInstance. */ + public OperationCallSettings.Builder< + MoveInstanceRequest, MoveInstanceResponse, MoveInstanceMetadata> + moveInstanceOperationSettings() { + return getStubSettingsBuilder().moveInstanceOperationSettings(); + } + @Override public InstanceAdminSettings build() throws IOException { return new InstanceAdminSettings(this); diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/gapic_metadata.json b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/gapic_metadata.json index 594176d4e7d..1500bd3742e 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/gapic_metadata.json +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/gapic_metadata.json @@ -55,6 +55,9 @@ "ListInstances": { "methods": ["listInstances", "listInstances", "listInstances", "listInstancesPagedCallable", "listInstancesCallable"] }, + "MoveInstance": { + "methods": ["moveInstanceAsync", "moveInstanceOperationCallable", "moveInstanceCallable"] + }, "SetIamPolicy": { "methods": ["setIamPolicy", "setIamPolicy", "setIamPolicy", "setIamPolicyCallable"] }, diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/GrpcInstanceAdminStub.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/GrpcInstanceAdminStub.java index 3369a1642b6..92ca5c4bf39 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/GrpcInstanceAdminStub.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/GrpcInstanceAdminStub.java @@ -63,6 +63,9 @@ import com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse; import com.google.spanner.admin.instance.v1.ListInstancesRequest; import com.google.spanner.admin.instance.v1.ListInstancesResponse; +import com.google.spanner.admin.instance.v1.MoveInstanceMetadata; +import com.google.spanner.admin.instance.v1.MoveInstanceRequest; +import com.google.spanner.admin.instance.v1.MoveInstanceResponse; import com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata; import com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest; import com.google.spanner.admin.instance.v1.UpdateInstanceMetadata; @@ -305,6 +308,15 @@ public class GrpcInstanceAdminStub extends InstanceAdminStub { ListInstancePartitionOperationsResponse.getDefaultInstance())) .build(); + private static final MethodDescriptor + moveInstanceMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("google.spanner.admin.instance.v1.InstanceAdmin/MoveInstance") + .setRequestMarshaller(ProtoUtils.marshaller(MoveInstanceRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .build(); + private final UnaryCallable listInstanceConfigsCallable; private final UnaryCallable @@ -364,6 +376,9 @@ public class GrpcInstanceAdminStub extends InstanceAdminStub { private final UnaryCallable< ListInstancePartitionOperationsRequest, ListInstancePartitionOperationsPagedResponse> listInstancePartitionOperationsPagedCallable; + private final UnaryCallable moveInstanceCallable; + private final OperationCallable + moveInstanceOperationCallable; private final BackgroundResource backgroundResources; private final GrpcOperationsStub operationsStub; @@ -626,6 +641,16 @@ protected GrpcInstanceAdminStub( return builder.build(); }) .build(); + GrpcCallSettings moveInstanceTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(moveInstanceMethodDescriptor) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); this.listInstanceConfigsCallable = callableFactory.createUnaryCallable( @@ -772,6 +797,15 @@ protected GrpcInstanceAdminStub( listInstancePartitionOperationsTransportSettings, settings.listInstancePartitionOperationsSettings(), clientContext); + this.moveInstanceCallable = + callableFactory.createUnaryCallable( + moveInstanceTransportSettings, settings.moveInstanceSettings(), clientContext); + this.moveInstanceOperationCallable = + callableFactory.createOperationCallable( + moveInstanceTransportSettings, + settings.moveInstanceOperationSettings(), + clientContext, + operationsStub); this.backgroundResources = new BackgroundResourceAggregation(clientContext.getBackgroundResources()); @@ -962,6 +996,17 @@ public UnaryCallable deleteInstancePartit return listInstancePartitionOperationsPagedCallable; } + @Override + public UnaryCallable moveInstanceCallable() { + return moveInstanceCallable; + } + + @Override + public OperationCallable + moveInstanceOperationCallable() { + return moveInstanceOperationCallable; + } + @Override public final void close() { try { diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/HttpJsonInstanceAdminStub.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/HttpJsonInstanceAdminStub.java index 9e530b899e0..82aaf253b98 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/HttpJsonInstanceAdminStub.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/HttpJsonInstanceAdminStub.java @@ -72,6 +72,9 @@ import com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse; import com.google.spanner.admin.instance.v1.ListInstancesRequest; import com.google.spanner.admin.instance.v1.ListInstancesResponse; +import com.google.spanner.admin.instance.v1.MoveInstanceMetadata; +import com.google.spanner.admin.instance.v1.MoveInstanceRequest; +import com.google.spanner.admin.instance.v1.MoveInstanceResponse; import com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata; import com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest; import com.google.spanner.admin.instance.v1.UpdateInstanceMetadata; @@ -96,8 +99,10 @@ public class HttpJsonInstanceAdminStub extends InstanceAdminStub { private static final TypeRegistry typeRegistry = TypeRegistry.newBuilder() + .add(MoveInstanceResponse.getDescriptor()) .add(InstanceConfig.getDescriptor()) .add(CreateInstancePartitionMetadata.getDescriptor()) + .add(MoveInstanceMetadata.getDescriptor()) .add(UpdateInstancePartitionMetadata.getDescriptor()) .add(Instance.getDescriptor()) .add(InstancePartition.getDescriptor()) @@ -888,6 +893,46 @@ public class HttpJsonInstanceAdminStub extends InstanceAdminStub { .build()) .build(); + private static final ApiMethodDescriptor + moveInstanceMethodDescriptor = + ApiMethodDescriptor.newBuilder() + .setFullMethodName("google.spanner.admin.instance.v1.InstanceAdmin/MoveInstance") + .setHttpMethod("POST") + .setType(ApiMethodDescriptor.MethodType.UNARY) + .setRequestFormatter( + ProtoMessageRequestFormatter.newBuilder() + .setPath( + "/v1/{name=projects/*/instances/*}:move", + request -> { + Map fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putPathParam(fields, "name", request.getName()); + return fields; + }) + .setQueryParamsExtractor( + request -> { + Map> fields = new HashMap<>(); + ProtoRestSerializer serializer = + ProtoRestSerializer.create(); + serializer.putQueryParam(fields, "$alt", "json;enum-encoding=int"); + return fields; + }) + .setRequestBodyExtractor( + request -> + ProtoRestSerializer.create() + .toBody("*", request.toBuilder().clearName().build(), true)) + .build()) + .setResponseParser( + ProtoMessageResponseParser.newBuilder() + .setDefaultInstance(Operation.getDefaultInstance()) + .setDefaultTypeRegistry(typeRegistry) + .build()) + .setOperationSnapshotFactory( + (MoveInstanceRequest request, Operation response) -> + HttpJsonOperationSnapshot.create(response)) + .build(); + private final UnaryCallable listInstanceConfigsCallable; private final UnaryCallable @@ -947,6 +992,9 @@ public class HttpJsonInstanceAdminStub extends InstanceAdminStub { private final UnaryCallable< ListInstancePartitionOperationsRequest, ListInstancePartitionOperationsPagedResponse> listInstancePartitionOperationsPagedCallable; + private final UnaryCallable moveInstanceCallable; + private final OperationCallable + moveInstanceOperationCallable; private final BackgroundResource backgroundResources; private final HttpJsonOperationsStub httpJsonOperationsStub; @@ -1279,6 +1327,17 @@ protected HttpJsonInstanceAdminStub( return builder.build(); }) .build(); + HttpJsonCallSettings moveInstanceTransportSettings = + HttpJsonCallSettings.newBuilder() + .setMethodDescriptor(moveInstanceMethodDescriptor) + .setTypeRegistry(typeRegistry) + .setParamsExtractor( + request -> { + RequestParamsBuilder builder = RequestParamsBuilder.create(); + builder.add("name", String.valueOf(request.getName())); + return builder.build(); + }) + .build(); this.listInstanceConfigsCallable = callableFactory.createUnaryCallable( @@ -1425,6 +1484,15 @@ protected HttpJsonInstanceAdminStub( listInstancePartitionOperationsTransportSettings, settings.listInstancePartitionOperationsSettings(), clientContext); + this.moveInstanceCallable = + callableFactory.createUnaryCallable( + moveInstanceTransportSettings, settings.moveInstanceSettings(), clientContext); + this.moveInstanceOperationCallable = + callableFactory.createOperationCallable( + moveInstanceTransportSettings, + settings.moveInstanceOperationSettings(), + clientContext, + httpJsonOperationsStub); this.backgroundResources = new BackgroundResourceAggregation(clientContext.getBackgroundResources()); @@ -1453,6 +1521,7 @@ public static List getMethodDescriptors() { methodDescriptors.add(deleteInstancePartitionMethodDescriptor); methodDescriptors.add(updateInstancePartitionMethodDescriptor); methodDescriptors.add(listInstancePartitionOperationsMethodDescriptor); + methodDescriptors.add(moveInstanceMethodDescriptor); return methodDescriptors; } @@ -1641,6 +1710,17 @@ public UnaryCallable deleteInstancePartit return listInstancePartitionOperationsPagedCallable; } + @Override + public UnaryCallable moveInstanceCallable() { + return moveInstanceCallable; + } + + @Override + public OperationCallable + moveInstanceOperationCallable() { + return moveInstanceOperationCallable; + } + @Override public final void close() { try { diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/InstanceAdminStub.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/InstanceAdminStub.java index 31d266a67ef..7bd8269c537 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/InstanceAdminStub.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/InstanceAdminStub.java @@ -58,6 +58,9 @@ import com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse; import com.google.spanner.admin.instance.v1.ListInstancesRequest; import com.google.spanner.admin.instance.v1.ListInstancesResponse; +import com.google.spanner.admin.instance.v1.MoveInstanceMetadata; +import com.google.spanner.admin.instance.v1.MoveInstanceRequest; +import com.google.spanner.admin.instance.v1.MoveInstanceResponse; import com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata; import com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest; import com.google.spanner.admin.instance.v1.UpdateInstanceMetadata; @@ -242,6 +245,15 @@ public UnaryCallable deleteInstancePartit "Not implemented: listInstancePartitionOperationsCallable()"); } + public OperationCallable + moveInstanceOperationCallable() { + throw new UnsupportedOperationException("Not implemented: moveInstanceOperationCallable()"); + } + + public UnaryCallable moveInstanceCallable() { + throw new UnsupportedOperationException("Not implemented: moveInstanceCallable()"); + } + @Override public abstract void close(); } diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/InstanceAdminStubSettings.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/InstanceAdminStubSettings.java index 9a00b312c61..7cd7545db1d 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/InstanceAdminStubSettings.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/instance/v1/stub/InstanceAdminStubSettings.java @@ -25,6 +25,7 @@ import com.google.api.core.ApiFunction; import com.google.api.core.ApiFuture; import com.google.api.core.BetaApi; +import com.google.api.core.ObsoleteApi; import com.google.api.gax.core.GaxProperties; import com.google.api.gax.core.GoogleCredentialsProvider; import com.google.api.gax.core.InstantiatingExecutorProvider; @@ -87,6 +88,9 @@ import com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse; import com.google.spanner.admin.instance.v1.ListInstancesRequest; import com.google.spanner.admin.instance.v1.ListInstancesResponse; +import com.google.spanner.admin.instance.v1.MoveInstanceMetadata; +import com.google.spanner.admin.instance.v1.MoveInstanceRequest; +import com.google.spanner.admin.instance.v1.MoveInstanceResponse; import com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata; import com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest; import com.google.spanner.admin.instance.v1.UpdateInstanceMetadata; @@ -113,7 +117,9 @@ *

    The builder of this class is recursive, so contained classes are themselves builders. When * build() is called, the tree of builders is called to create the complete settings object. * - *

    For example, to set the total timeout of getInstanceConfig to 30 seconds: + *

    For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of getInstanceConfig: * *

    {@code
      * // This snippet has been automatically generated and should be regarded as a code template only.
    @@ -130,10 +136,47 @@
      *             .getInstanceConfigSettings()
      *             .getRetrySettings()
      *             .toBuilder()
    - *             .setTotalTimeout(Duration.ofSeconds(30))
    + *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
    + *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
    + *             .setMaxAttempts(5)
    + *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
    + *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
    + *             .setRetryDelayMultiplier(1.3)
    + *             .setRpcTimeoutMultiplier(1.5)
    + *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
      *             .build());
      * InstanceAdminStubSettings instanceAdminSettings = instanceAdminSettingsBuilder.build();
      * }
    + * + * Please refer to the [Client Side Retry + * Guide](https://github.com/googleapis/google-cloud-java/blob/main/docs/client_retries.md) for + * additional support in setting retries. + * + *

    To configure the RetrySettings of a Long Running Operation method, create an + * OperationTimedPollAlgorithm object and update the RPC's polling algorithm. For example, to + * configure the RetrySettings for createInstanceConfig: + * + *

    {@code
    + * // This snippet has been automatically generated and should be regarded as a code template only.
    + * // It will require modifications to work:
    + * // - It may require correct/in-range values for request initialization.
    + * // - It may require specifying regional endpoints when creating the service client as shown in
    + * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
    + * InstanceAdminStubSettings.Builder instanceAdminSettingsBuilder =
    + *     InstanceAdminStubSettings.newBuilder();
    + * TimedRetryAlgorithm timedRetryAlgorithm =
    + *     OperationalTimedPollAlgorithm.create(
    + *         RetrySettings.newBuilder()
    + *             .setInitialRetryDelayDuration(Duration.ofMillis(500))
    + *             .setRetryDelayMultiplier(1.5)
    + *             .setMaxRetryDelay(Duration.ofMillis(5000))
    + *             .setTotalTimeoutDuration(Duration.ofHours(24))
    + *             .build());
    + * instanceAdminSettingsBuilder
    + *     .createClusterOperationSettings()
    + *     .setPollingAlgorithm(timedRetryAlgorithm)
    + *     .build();
    + * }
    */ @Generated("by gapic-generator-java") public class InstanceAdminStubSettings extends StubSettings { @@ -204,6 +247,10 @@ public class InstanceAdminStubSettings extends StubSettings listInstancePartitionOperationsSettings; + private final UnaryCallSettings moveInstanceSettings; + private final OperationCallSettings< + MoveInstanceRequest, MoveInstanceResponse, MoveInstanceMetadata> + moveInstanceOperationSettings; private static final PagedListDescriptor< ListInstanceConfigsRequest, ListInstanceConfigsResponse, InstanceConfig> @@ -239,9 +286,7 @@ public String extractNextToken(ListInstanceConfigsResponse payload) { @Override public Iterable extractResources(ListInstanceConfigsResponse payload) { - return payload.getInstanceConfigsList() == null - ? ImmutableList.of() - : payload.getInstanceConfigsList(); + return payload.getInstanceConfigsList(); } }; @@ -286,9 +331,7 @@ public String extractNextToken(ListInstanceConfigOperationsResponse payload) { @Override public Iterable extractResources( ListInstanceConfigOperationsResponse payload) { - return payload.getOperationsList() == null - ? ImmutableList.of() - : payload.getOperationsList(); + return payload.getOperationsList(); } }; @@ -322,9 +365,7 @@ public String extractNextToken(ListInstancesResponse payload) { @Override public Iterable extractResources(ListInstancesResponse payload) { - return payload.getInstancesList() == null - ? ImmutableList.of() - : payload.getInstancesList(); + return payload.getInstancesList(); } }; @@ -365,9 +406,7 @@ public String extractNextToken(ListInstancePartitionsResponse payload) { @Override public Iterable extractResources( ListInstancePartitionsResponse payload) { - return payload.getInstancePartitionsList() == null - ? ImmutableList.of() - : payload.getInstancePartitionsList(); + return payload.getInstancePartitionsList(); } }; @@ -414,9 +453,7 @@ public String extractNextToken(ListInstancePartitionOperationsResponse payload) @Override public Iterable extractResources( ListInstancePartitionOperationsResponse payload) { - return payload.getOperationsList() == null - ? ImmutableList.of() - : payload.getOperationsList(); + return payload.getOperationsList(); } }; @@ -710,6 +747,17 @@ public UnaryCallSettings getIamPolicySettings() { return listInstancePartitionOperationsSettings; } + /** Returns the object with the settings used for calls to moveInstance. */ + public UnaryCallSettings moveInstanceSettings() { + return moveInstanceSettings; + } + + /** Returns the object with the settings used for calls to moveInstance. */ + public OperationCallSettings + moveInstanceOperationSettings() { + return moveInstanceOperationSettings; + } + public InstanceAdminStub createStub() throws IOException { if (getTransportChannelProvider() .getTransportName() @@ -738,6 +786,7 @@ public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuild } /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") public static String getDefaultEndpoint() { return "spanner.googleapis.com:443"; } @@ -852,6 +901,8 @@ protected InstanceAdminStubSettings(Builder settingsBuilder) throws IOException settingsBuilder.updateInstancePartitionOperationSettings().build(); listInstancePartitionOperationsSettings = settingsBuilder.listInstancePartitionOperationsSettings().build(); + moveInstanceSettings = settingsBuilder.moveInstanceSettings().build(); + moveInstanceOperationSettings = settingsBuilder.moveInstanceOperationSettings().build(); } /** Builder for InstanceAdminStubSettings. */ @@ -924,6 +975,10 @@ public static class Builder extends StubSettings.Builder listInstancePartitionOperationsSettings; + private final UnaryCallSettings.Builder moveInstanceSettings; + private final OperationCallSettings.Builder< + MoveInstanceRequest, MoveInstanceResponse, MoveInstanceMetadata> + moveInstanceOperationSettings; private static final ImmutableMap> RETRYABLE_CODE_DEFINITIONS; @@ -1033,6 +1088,8 @@ protected Builder(ClientContext clientContext) { updateInstancePartitionOperationSettings = OperationCallSettings.newBuilder(); listInstancePartitionOperationsSettings = PagedCallSettings.newBuilder(LIST_INSTANCE_PARTITION_OPERATIONS_PAGE_STR_FACT); + moveInstanceSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + moveInstanceOperationSettings = OperationCallSettings.newBuilder(); unaryMethodSettingsBuilders = ImmutableList.>of( @@ -1055,7 +1112,8 @@ protected Builder(ClientContext clientContext) { createInstancePartitionSettings, deleteInstancePartitionSettings, updateInstancePartitionSettings, - listInstancePartitionOperationsSettings); + listInstancePartitionOperationsSettings, + moveInstanceSettings); initDefaults(this); } @@ -1094,6 +1152,8 @@ protected Builder(InstanceAdminStubSettings settings) { settings.updateInstancePartitionOperationSettings.toBuilder(); listInstancePartitionOperationsSettings = settings.listInstancePartitionOperationsSettings.toBuilder(); + moveInstanceSettings = settings.moveInstanceSettings.toBuilder(); + moveInstanceOperationSettings = settings.moveInstanceOperationSettings.toBuilder(); unaryMethodSettingsBuilders = ImmutableList.>of( @@ -1116,7 +1176,8 @@ protected Builder(InstanceAdminStubSettings settings) { createInstancePartitionSettings, deleteInstancePartitionSettings, updateInstancePartitionSettings, - listInstancePartitionOperationsSettings); + listInstancePartitionOperationsSettings, + moveInstanceSettings); } private static Builder createDefault() { @@ -1244,6 +1305,11 @@ private static Builder initDefaults(Builder builder) { .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")); + builder + .moveInstanceSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")); + builder .createInstanceConfigOperationSettings() .setInitialCallSettings( @@ -1392,6 +1458,30 @@ private static Builder initDefaults(Builder builder) { .setTotalTimeout(Duration.ofMillis(300000L)) .build())); + builder + .moveInstanceOperationSettings() + .setInitialCallSettings( + UnaryCallSettings + .newUnaryCallSettingsBuilder() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")) + .build()) + .setResponseTransformer( + ProtoOperationTransformers.ResponseTransformer.create(MoveInstanceResponse.class)) + .setMetadataTransformer( + ProtoOperationTransformers.MetadataTransformer.create(MoveInstanceMetadata.class)) + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelay(Duration.ofMillis(45000L)) + .setInitialRpcTimeout(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeout(Duration.ZERO) + .setTotalTimeout(Duration.ofMillis(300000L)) + .build())); + return builder; } @@ -1577,6 +1667,18 @@ public UnaryCallSettings.Builder getIamPolicySettin return listInstancePartitionOperationsSettings; } + /** Returns the builder for the settings used for calls to moveInstance. */ + public UnaryCallSettings.Builder moveInstanceSettings() { + return moveInstanceSettings; + } + + /** Returns the builder for the settings used for calls to moveInstance. */ + public OperationCallSettings.Builder< + MoveInstanceRequest, MoveInstanceResponse, MoveInstanceMetadata> + moveInstanceOperationSettings() { + return moveInstanceOperationSettings; + } + @Override public InstanceAdminStubSettings build() throws IOException { return new InstanceAdminStubSettings(this); diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AbstractBaseUnitOfWork.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AbstractBaseUnitOfWork.java index bd43daf9e57..9e431dbc0ba 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AbstractBaseUnitOfWork.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AbstractBaseUnitOfWork.java @@ -25,6 +25,7 @@ import com.google.cloud.spanner.BatchTransactionId; import com.google.cloud.spanner.Dialect; import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.OpenTelemetryContextKeys; import com.google.cloud.spanner.Options.QueryOption; import com.google.cloud.spanner.Options.RpcPriority; import com.google.cloud.spanner.Partition; @@ -45,6 +46,9 @@ import io.grpc.Context; import io.grpc.MethodDescriptor; import io.grpc.Status; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.context.Scope; import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -63,10 +67,17 @@ /** Base for all {@link Connection}-based transactions and batches. */ abstract class AbstractBaseUnitOfWork implements UnitOfWork { + static final String DB_STATEMENT = "db.statement"; + static final AttributeKey DB_STATEMENT_KEY = AttributeKey.stringKey(DB_STATEMENT); + static final AttributeKey> DB_STATEMENT_ARRAY_KEY = + AttributeKey.stringArrayKey(DB_STATEMENT); + private final StatementExecutor statementExecutor; private final StatementTimeout statementTimeout; protected final String transactionTag; + protected final boolean excludeTxnFromChangeStreams; protected final RpcPriority rpcPriority; + protected final Span span; /** Class for keeping track of the stacktrace of the caller of an async statement. */ static final class SpannerAsyncExecutionException extends RuntimeException { @@ -99,7 +110,10 @@ abstract static class Builder, T extends AbstractBaseUni private StatementExecutor statementExecutor; private StatementTimeout statementTimeout = new StatementTimeout(); private String transactionTag; + + private boolean excludeTxnFromChangeStreams; private RpcPriority rpcPriority; + private Span span; Builder() {} @@ -125,11 +139,21 @@ B setTransactionTag(@Nullable String tag) { return self(); } + B setExcludeTxnFromChangeStreams(boolean excludeTxnFromChangeStreams) { + this.excludeTxnFromChangeStreams = excludeTxnFromChangeStreams; + return self(); + } + B setRpcPriority(@Nullable RpcPriority rpcPriority) { this.rpcPriority = rpcPriority; return self(); } + B setSpan(@Nullable Span span) { + this.span = span; + return self(); + } + abstract T build(); } @@ -138,7 +162,25 @@ B setRpcPriority(@Nullable RpcPriority rpcPriority) { this.statementExecutor = builder.statementExecutor; this.statementTimeout = builder.statementTimeout; this.transactionTag = builder.transactionTag; + this.excludeTxnFromChangeStreams = builder.excludeTxnFromChangeStreams; this.rpcPriority = builder.rpcPriority; + this.span = Preconditions.checkNotNull(builder.span); + } + + @Override + public Span getSpan() { + return this.span; + } + + ApiFuture asyncEndUnitOfWorkSpan() { + return this.statementExecutor.submit(this::endUnitOfWorkSpan); + } + + private Void endUnitOfWorkSpan() { + if (this.span != null) { + this.span.end(); + } + return null; } /** @@ -318,37 +360,47 @@ public ApiCallContext configure( } }); } - ApiFuture f = statementExecutor.submit(context.wrap(callable)); - final SpannerAsyncExecutionException caller = - callType == CallType.ASYNC - ? new SpannerAsyncExecutionException(statement.getStatement()) - : null; - final ApiFuture future = - ApiFutures.catching( - f, - Throwable.class, - input -> { - if (caller != null) { - input.addSuppressed(caller); + // Register the name of the thread that called this method as the thread name that should be + // traced. + try (Scope ignore = + io.opentelemetry.context.Context.current() + .with(OpenTelemetryContextKeys.THREAD_NAME_KEY, Thread.currentThread().getName()) + .makeCurrent()) { + ApiFuture f = statementExecutor.submit(context.wrap(callable)); + final SpannerAsyncExecutionException caller = + callType == CallType.ASYNC + ? new SpannerAsyncExecutionException(statement.getStatement()) + : null; + final ApiFuture future = + ApiFutures.catching( + f, + Throwable.class, + input -> { + if (caller != null) { + input.addSuppressed(caller); + } + throw SpannerExceptionFactory.asSpannerException(input); + }, + MoreExecutors.directExecutor()); + synchronized (this) { + this.currentlyRunningStatementFuture = future; + } + future.addListener( + new Runnable() { + @Override + public void run() { + synchronized (this) { + if (currentlyRunningStatementFuture == future) { + currentlyRunningStatementFuture = null; + } } - throw SpannerExceptionFactory.asSpannerException(input); - }, - MoreExecutors.directExecutor()); - synchronized (this) { - this.currentlyRunningStatementFuture = future; - } - future.addListener( - new Runnable() { - @Override - public void run() { - synchronized (this) { - if (currentlyRunningStatementFuture == future) { - currentlyRunningStatementFuture = null; + if (isSingleUse()) { + endUnitOfWorkSpan(); } } - } - }, - MoreExecutors.directExecutor()); - return future; + }, + MoreExecutors.directExecutor()); + return future; + } } } diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AbstractMultiUseTransaction.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AbstractMultiUseTransaction.java index da1ad2051c7..ca78c3e5aea 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AbstractMultiUseTransaction.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/AbstractMultiUseTransaction.java @@ -29,6 +29,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.spanner.v1.SpannerGrpc; +import io.opentelemetry.context.Scope; import java.util.LinkedList; import java.util.Objects; import javax.annotation.Nonnull; @@ -94,6 +95,11 @@ public String toString() { super(builder); } + @Override + public boolean isSingleUse() { + return false; + } + @Override public Type getType() { return Type.TRANSACTION; @@ -124,16 +130,18 @@ public ApiFuture executeQueryAsync( final AnalyzeMode analyzeMode, final QueryOption... options) { Preconditions.checkArgument(statement.isQuery(), "Statement is not a query"); - checkOrCreateValidTransaction(statement, callType); - return executeStatementAsync( - callType, - statement, - () -> { - checkAborted(); - return DirectExecuteResultSet.ofResultSet( - internalExecuteQuery(statement, analyzeMode, options)); - }, - SpannerGrpc.getExecuteStreamingSqlMethod()); + try (Scope ignore = span.makeCurrent()) { + checkOrCreateValidTransaction(statement, callType); + return executeStatementAsync( + callType, + statement, + () -> { + checkAborted(); + return DirectExecuteResultSet.ofResultSet( + internalExecuteQuery(statement, analyzeMode, options)); + }, + SpannerGrpc.getExecuteStreamingSqlMethod()); + } } ResultSet internalExecuteQuery( diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementValueConverters.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementValueConverters.java index 6be277777a6..fd078c99b6a 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementValueConverters.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ClientSideStatementValueConverters.java @@ -16,6 +16,11 @@ package com.google.cloud.spanner.connection; +import static com.google.cloud.spanner.connection.ReadOnlyStalenessUtil.parseTimeUnit; +import static com.google.cloud.spanner.connection.ReadOnlyStalenessUtil.toChronoUnit; + +import com.google.api.gax.core.CredentialsProvider; +import com.google.cloud.spanner.Dialect; import com.google.cloud.spanner.ErrorCode; import com.google.cloud.spanner.Options.RpcPriority; import com.google.cloud.spanner.SpannerException; @@ -26,15 +31,17 @@ import com.google.cloud.spanner.connection.PgTransactionMode.IsolationLevel; import com.google.common.base.Function; import com.google.common.base.Preconditions; -import com.google.protobuf.Duration; -import com.google.protobuf.util.Durations; +import com.google.common.base.Strings; import com.google.spanner.v1.DirectedReadOptions; -import com.google.spanner.v1.RequestOptions.Priority; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.time.Duration; +import java.time.temporal.ChronoUnit; +import java.util.Base64; import java.util.EnumSet; import java.util.HashMap; import java.util.Locale; import java.util.Map; -import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -70,7 +77,11 @@ private E get(String value) { /** Converter from string to {@link Boolean} */ static class BooleanConverter implements ClientSideStatementValueConverter { + static final BooleanConverter INSTANCE = new BooleanConverter(); + + private BooleanConverter() {} + /** Constructor that is needed for reflection. */ public BooleanConverter(String allowedValues) {} @Override @@ -139,7 +150,11 @@ public Boolean convert(String value) { /** Converter from string to a non-negative integer. */ static class NonNegativeIntegerConverter implements ClientSideStatementValueConverter { + static final NonNegativeIntegerConverter INSTANCE = new NonNegativeIntegerConverter(); + private NonNegativeIntegerConverter() {} + + /** Constructor needed for reflection. */ public NonNegativeIntegerConverter(String allowedValues) {} @Override @@ -164,9 +179,19 @@ public Integer convert(String value) { /** Converter from string to {@link Duration}. */ static class DurationConverter implements ClientSideStatementValueConverter { + static final DurationConverter INSTANCE = + new DurationConverter("('(\\d{1,19})(s|ms|us|ns)'|\\d{1,19}|NULL)"); + + private final String resetValue; + private final Pattern allowedValues; public DurationConverter(String allowedValues) { + this("NULL", allowedValues); + } + + DurationConverter(String resetValue, String allowedValues) { + this.resetValue = Preconditions.checkNotNull(resetValue); // Remove the parentheses from the beginning and end. this.allowedValues = Pattern.compile( @@ -182,17 +207,25 @@ public Class getParameterClass() { public Duration convert(String value) { Matcher matcher = allowedValues.matcher(value); if (matcher.find()) { - if (matcher.group(0).equalsIgnoreCase("null")) { - return Durations.fromNanos(0L); + if (value.trim().equalsIgnoreCase(resetValue)) { + return Duration.ZERO; } else { - Duration duration = - ReadOnlyStalenessUtil.createDuration( - Long.parseLong(matcher.group(1)), - ReadOnlyStalenessUtil.parseTimeUnit(matcher.group(2))); - if (duration.getSeconds() == 0L && duration.getNanos() == 0) { + try { + Duration duration; + if (matcher.group(1) != null && matcher.group(2) != null) { + ChronoUnit unit = toChronoUnit(parseTimeUnit(matcher.group(2))); + duration = Duration.of(Long.parseLong(matcher.group(1)), unit); + } else { + duration = Duration.ofMillis(Long.parseLong(value.trim())); + } + if (duration.isZero()) { + return null; + } + return duration; + } catch (NumberFormatException exception) { + // Converters should return null for invalid values. return null; } - return duration; } } return null; @@ -200,50 +233,19 @@ public Duration convert(String value) { } /** Converter from string to {@link Duration}. */ - static class PgDurationConverter implements ClientSideStatementValueConverter { - private final Pattern allowedValues; - + static class PgDurationConverter extends DurationConverter { public PgDurationConverter(String allowedValues) { - // Remove the parentheses from the beginning and end. - this.allowedValues = - Pattern.compile( - "(?is)\\A" + allowedValues.substring(1, allowedValues.length() - 1) + "\\z"); - } - - @Override - public Class getParameterClass() { - return Duration.class; - } - - @Override - public Duration convert(String value) { - Matcher matcher = allowedValues.matcher(value); - if (matcher.find()) { - Duration duration; - if (matcher.group(0).equalsIgnoreCase("default")) { - return Durations.fromNanos(0L); - } else if (matcher.group(2) == null) { - duration = - ReadOnlyStalenessUtil.createDuration( - Long.parseLong(matcher.group(0)), TimeUnit.MILLISECONDS); - } else { - duration = - ReadOnlyStalenessUtil.createDuration( - Long.parseLong(matcher.group(1)), - ReadOnlyStalenessUtil.parseTimeUnit(matcher.group(2))); - } - if (duration.getSeconds() == 0L && duration.getNanos() == 0) { - return null; - } - return duration; - } - return null; + super("DEFAULT", allowedValues); } } /** Converter from string to possible values for read only staleness ({@link TimestampBound}). */ static class ReadOnlyStalenessConverter implements ClientSideStatementValueConverter { + static final ReadOnlyStalenessConverter INSTANCE = + new ReadOnlyStalenessConverter( + "'((STRONG)|(MIN_READ_TIMESTAMP)[\\t ]+((\\d{4})-(\\d{2})-(\\d{2})([Tt](\\d{2}):(\\d{2}):(\\d{2})(\\.\\d{1,9})?)([Zz]|([+-])(\\d{2}):(\\d{2})))|(READ_TIMESTAMP)[\\t ]+((\\d{4})-(\\d{2})-(\\d{2})([Tt](\\d{2}):(\\d{2}):(\\d{2})(\\.\\d{1,9})?)([Zz]|([+-])(\\d{2}):(\\d{2})))|(MAX_STALENESS)[\\t ]+((\\d{1,19})(s|ms|us|ns))|(EXACT_STALENESS)[\\t ]+((\\d{1,19})(s|ms|us|ns)))'"); + private final Pattern allowedValues; private final CaseInsensitiveEnumMap values = new CaseInsensitiveEnumMap<>(Mode.class); @@ -287,7 +289,7 @@ public TimestampBound convert(String value) { try { return TimestampBound.ofExactStaleness( Long.parseLong(matcher.group(groupIndex + 2)), - ReadOnlyStalenessUtil.parseTimeUnit(matcher.group(groupIndex + 3))); + parseTimeUnit(matcher.group(groupIndex + 3))); } catch (IllegalArgumentException e) { throw SpannerExceptionFactory.newSpannerException( ErrorCode.INVALID_ARGUMENT, e.getMessage()); @@ -296,7 +298,7 @@ public TimestampBound convert(String value) { try { return TimestampBound.ofMaxStaleness( Long.parseLong(matcher.group(groupIndex + 2)), - ReadOnlyStalenessUtil.parseTimeUnit(matcher.group(groupIndex + 3))); + parseTimeUnit(matcher.group(groupIndex + 3))); } catch (IllegalArgumentException e) { throw SpannerExceptionFactory.newSpannerException( ErrorCode.INVALID_ARGUMENT, e.getMessage()); @@ -354,9 +356,14 @@ public DirectedReadOptions convert(String value) { /** Converter for converting strings to {@link AutocommitDmlMode} values. */ static class AutocommitDmlModeConverter implements ClientSideStatementValueConverter { + static final AutocommitDmlModeConverter INSTANCE = new AutocommitDmlModeConverter(); + private final CaseInsensitiveEnumMap values = new CaseInsensitiveEnumMap<>(AutocommitDmlMode.class); + private AutocommitDmlModeConverter() {} + + /** Constructor needed for reflection. */ public AutocommitDmlModeConverter(String allowedValues) {} @Override @@ -370,7 +377,35 @@ public AutocommitDmlMode convert(String value) { } } + static class ConnectionStateTypeConverter + implements ClientSideStatementValueConverter { + static final ConnectionStateTypeConverter INSTANCE = new ConnectionStateTypeConverter(); + + private final CaseInsensitiveEnumMap values = + new CaseInsensitiveEnumMap<>(ConnectionState.Type.class); + + private ConnectionStateTypeConverter() {} + + /** Constructor that is needed for reflection. */ + public ConnectionStateTypeConverter(String allowedValues) {} + + @Override + public Class getParameterClass() { + return ConnectionState.Type.class; + } + + @Override + public ConnectionState.Type convert(String value) { + return values.get(value); + } + } + static class StringValueConverter implements ClientSideStatementValueConverter { + static final StringValueConverter INSTANCE = new StringValueConverter(); + + private StringValueConverter() {} + + /** Constructor needed for reflection. */ public StringValueConverter(String allowedValues) {} @Override @@ -497,9 +532,11 @@ public PgTransactionMode convert(String value) { } /** Converter for converting strings to {@link RpcPriority} values. */ - static class RpcPriorityConverter implements ClientSideStatementValueConverter { - private final CaseInsensitiveEnumMap values = - new CaseInsensitiveEnumMap<>(Priority.class); + static class RpcPriorityConverter implements ClientSideStatementValueConverter { + static final RpcPriorityConverter INSTANCE = new RpcPriorityConverter("(HIGH|MEDIUM|LOW|NULL)"); + + private final CaseInsensitiveEnumMap values = + new CaseInsensitiveEnumMap<>(RpcPriority.class); private final Pattern allowedValues; public RpcPriorityConverter(String allowedValues) { @@ -510,28 +547,33 @@ public RpcPriorityConverter(String allowedValues) { } @Override - public Class getParameterClass() { - return Priority.class; + public Class getParameterClass() { + return RpcPriority.class; } @Override - public Priority convert(String value) { + public RpcPriority convert(String value) { Matcher matcher = allowedValues.matcher(value); if (matcher.find()) { if (matcher.group(0).equalsIgnoreCase("null")) { - return Priority.PRIORITY_UNSPECIFIED; + return RpcPriority.UNSPECIFIED; } } - return values.get("PRIORITY_" + value); + return values.get(value); } } /** Converter for converting strings to {@link SavepointSupport} values. */ static class SavepointSupportConverter implements ClientSideStatementValueConverter { + static final SavepointSupportConverter INSTANCE = new SavepointSupportConverter(); + private final CaseInsensitiveEnumMap values = new CaseInsensitiveEnumMap<>(SavepointSupport.class); + private SavepointSupportConverter() {} + + /** Constructor needed for reflection. */ public SavepointSupportConverter(String allowedValues) {} @Override @@ -545,6 +587,30 @@ public SavepointSupport convert(String value) { } } + /** Converter for converting strings to {@link DdlInTransactionMode} values. */ + static class DdlInTransactionModeConverter + implements ClientSideStatementValueConverter { + static final DdlInTransactionModeConverter INSTANCE = new DdlInTransactionModeConverter(); + + private final CaseInsensitiveEnumMap values = + new CaseInsensitiveEnumMap<>(DdlInTransactionMode.class); + + private DdlInTransactionModeConverter() {} + + /** Constructor needed for reflection. */ + public DdlInTransactionModeConverter(String allowedValues) {} + + @Override + public Class getParameterClass() { + return DdlInTransactionMode.class; + } + + @Override + public DdlInTransactionMode convert(String value) { + return values.get(value); + } + } + static class ExplainCommandConverter implements ClientSideStatementValueConverter { @Override public Class getParameterClass() { @@ -563,4 +629,113 @@ public String convert(String value) { return value.substring(7).trim(); } } + + /** Converter for converting Base64 encoded string to byte[] */ + static class ProtoDescriptorsConverter implements ClientSideStatementValueConverter { + + public ProtoDescriptorsConverter(String allowedValues) {} + + @Override + public Class getParameterClass() { + return byte[].class; + } + + @Override + public byte[] convert(String value) { + if (value == null || value.length() == 0 || value.equalsIgnoreCase("null")) { + return null; + } + try { + return Base64.getDecoder().decode(value); + } catch (IllegalArgumentException e) { + return null; + } + } + } + + /** Converter for converting String that take in file path as input to String */ + static class ProtoDescriptorsFileConverter implements ClientSideStatementValueConverter { + + public ProtoDescriptorsFileConverter(String allowedValues) {} + + @Override + public Class getParameterClass() { + return String.class; + } + + @Override + public String convert(String filePath) { + if (Strings.isNullOrEmpty(filePath)) { + return null; + } + return filePath; + } + } + + static class CredentialsProviderConverter + implements ClientSideStatementValueConverter { + static final CredentialsProviderConverter INSTANCE = new CredentialsProviderConverter(); + + private CredentialsProviderConverter() {} + + @Override + public Class getParameterClass() { + return CredentialsProvider.class; + } + + @Override + public CredentialsProvider convert(String credentialsProviderName) { + if (!Strings.isNullOrEmpty(credentialsProviderName)) { + try { + Class clazz = + (Class) Class.forName(credentialsProviderName); + Constructor constructor = clazz.getDeclaredConstructor(); + return constructor.newInstance(); + } catch (ClassNotFoundException classNotFoundException) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Unknown or invalid CredentialsProvider class name: " + credentialsProviderName, + classNotFoundException); + } catch (NoSuchMethodException noSuchMethodException) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Credentials provider " + + credentialsProviderName + + " does not have a public no-arg constructor.", + noSuchMethodException); + } catch (InvocationTargetException + | InstantiationException + | IllegalAccessException exception) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + "Failed to create an instance of " + + credentialsProviderName + + ": " + + exception.getMessage(), + exception); + } + } + return null; + } + } + + /** Converter for converting strings to {@link Dialect} values. */ + static class DialectConverter implements ClientSideStatementValueConverter { + static final DialectConverter INSTANCE = new DialectConverter(); + + private final CaseInsensitiveEnumMap values = + new CaseInsensitiveEnumMap<>(Dialect.class); + + private DialectConverter() {} + + @Override + public Class getParameterClass() { + return Dialect.class; + } + + @Override + public Dialect convert(String value) { + return values.get(value); + } + } } diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/Connection.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/Connection.java index e1a4415ea49..b2d4caa9df8 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/Connection.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/Connection.java @@ -47,6 +47,7 @@ import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import javax.annotation.Nonnull; /** * Internal connection API for Google Cloud Spanner. This interface may introduce breaking changes @@ -171,6 +172,19 @@ public interface Connection extends AutoCloseable { /** @return true if this connection has been closed. */ boolean isClosed(); + /** + * Resets the state of this connection to the default state that it had when it was first created. + * Calling this method after a transaction has started (that is; after a statement has been + * executed in the transaction), does not change the active transaction. If for example a + * transaction has been started with a transaction tag, the transaction tag for the active + * transaction is not reset. + * + *

    You can use this method to reset the state of the connection before returning a connection + * to a connection pool, and/or before using a connection that was retrieved from a connection + * pool. + */ + void reset(); + /** * Sets autocommit on/off for this {@link Connection}. Connections in autocommit mode will apply * any changes to the database directly without waiting for an explicit commit. DDL- and DML @@ -387,6 +401,41 @@ default String getStatementTag() { throw new UnsupportedOperationException(); } + /** + * Sets whether the next transaction should be excluded from all change streams with the DDL + * option `allow_txn_exclusion=true` + */ + default void setExcludeTxnFromChangeStreams(boolean excludeTxnFromChangeStreams) { + throw new UnsupportedOperationException(); + } + + /** + * Returns true if the next transaction should be excluded from all change streams with the DDL + * option `allow_txn_exclusion=true` + */ + default boolean isExcludeTxnFromChangeStreams() { + throw new UnsupportedOperationException(); + } + + /** + * Sets the proto descriptors to use for the next DDL statement (single or batch) that will be + * executed. The proto descriptor is automatically cleared after the statement is executed. + * + * @param protoDescriptors The proto descriptors to use with the next DDL statement (single or + * batch) that will be executed on this connection. + */ + default void setProtoDescriptors(@Nonnull byte[] protoDescriptors) { + throw new UnsupportedOperationException(); + } + + /** + * @return The proto descriptor that will be used with the next DDL statement (single or batch) + * that is executed on this connection. + */ + default byte[] getProtoDescriptors() { + throw new UnsupportedOperationException(); + } + /** * @return true if this connection will automatically retry read/write transactions * that abort. This method may only be called when the connection is in read/write @@ -617,6 +666,25 @@ default boolean isDelayTransactionStartUntilFirstWrite() { throw new UnsupportedOperationException("Unimplemented"); } + /** + * Sets whether this connection should keep read/write transactions alive by executing a SELECT 1 + * once every 10 seconds during inactive read/write transactions. + * + *

    NOTE: This will keep read/write transactions alive and hold on to locks until it is + * explicitly committed or rolled back. + */ + default void setKeepTransactionAlive(boolean keepTransactionAlive) { + throw new UnsupportedOperationException("Unimplemented"); + } + + /** + * @return true if this connection keeps read/write transactions alive by executing a SELECT 1 + * once every 10 seconds during inactive read/write transactions. + */ + default boolean isKeepTransactionAlive() { + throw new UnsupportedOperationException("Unimplemented"); + } + /** * Commits the current transaction of this connection. All mutations that have been buffered * during the current transaction will be written to the database. diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionImpl.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionImpl.java index 70e789eb580..407742678d9 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionImpl.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionImpl.java @@ -17,16 +17,40 @@ package com.google.cloud.spanner.connection; import static com.google.cloud.spanner.SpannerApiFutures.get; +import static com.google.cloud.spanner.connection.ConnectionOptions.isEnableTransactionalConnectionStateForPostgreSQL; import static com.google.cloud.spanner.connection.ConnectionPreconditions.checkValidIdentifier; +import static com.google.cloud.spanner.connection.ConnectionProperties.AUTOCOMMIT; +import static com.google.cloud.spanner.connection.ConnectionProperties.AUTOCOMMIT_DML_MODE; +import static com.google.cloud.spanner.connection.ConnectionProperties.AUTO_PARTITION_MODE; +import static com.google.cloud.spanner.connection.ConnectionProperties.DATA_BOOST_ENABLED; +import static com.google.cloud.spanner.connection.ConnectionProperties.DDL_IN_TRANSACTION_MODE; +import static com.google.cloud.spanner.connection.ConnectionProperties.DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE; +import static com.google.cloud.spanner.connection.ConnectionProperties.DIRECTED_READ; +import static com.google.cloud.spanner.connection.ConnectionProperties.KEEP_TRANSACTION_ALIVE; +import static com.google.cloud.spanner.connection.ConnectionProperties.MAX_COMMIT_DELAY; +import static com.google.cloud.spanner.connection.ConnectionProperties.MAX_PARTITIONED_PARALLELISM; +import static com.google.cloud.spanner.connection.ConnectionProperties.MAX_PARTITIONS; +import static com.google.cloud.spanner.connection.ConnectionProperties.OPTIMIZER_STATISTICS_PACKAGE; +import static com.google.cloud.spanner.connection.ConnectionProperties.OPTIMIZER_VERSION; +import static com.google.cloud.spanner.connection.ConnectionProperties.READONLY; +import static com.google.cloud.spanner.connection.ConnectionProperties.READ_ONLY_STALENESS; +import static com.google.cloud.spanner.connection.ConnectionProperties.RETRY_ABORTS_INTERNALLY; +import static com.google.cloud.spanner.connection.ConnectionProperties.RETURN_COMMIT_STATS; +import static com.google.cloud.spanner.connection.ConnectionProperties.RPC_PRIORITY; +import static com.google.cloud.spanner.connection.ConnectionProperties.SAVEPOINT_SUPPORT; +import static com.google.cloud.spanner.connection.ConnectionProperties.TRACING_PREFIX; import com.google.api.core.ApiFuture; import com.google.api.core.ApiFutures; +import com.google.api.gax.core.GaxProperties; +import com.google.cloud.ByteArray; import com.google.cloud.Timestamp; import com.google.cloud.spanner.AsyncResultSet; import com.google.cloud.spanner.BatchClient; import com.google.cloud.spanner.BatchReadOnlyTransaction; import com.google.cloud.spanner.CommitResponse; import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; import com.google.cloud.spanner.Dialect; import com.google.cloud.spanner.ErrorCode; import com.google.cloud.spanner.Mutation; @@ -47,16 +71,28 @@ import com.google.cloud.spanner.TimestampBound.Mode; import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; +import com.google.cloud.spanner.connection.ConnectionProperty.Context; +import com.google.cloud.spanner.connection.ConnectionState.Type; import com.google.cloud.spanner.connection.StatementExecutor.StatementTimeout; import com.google.cloud.spanner.connection.StatementResult.ResultType; import com.google.cloud.spanner.connection.UnitOfWork.CallType; +import com.google.cloud.spanner.connection.UnitOfWork.EndTransactionCallback; import com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import com.google.common.base.Suppliers; import com.google.common.util.concurrent.MoreExecutors; import com.google.spanner.v1.DirectedReadOptions; import com.google.spanner.v1.ExecuteSqlRequest.QueryOptions; import com.google.spanner.v1.ResultSetStats; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.common.AttributesBuilder; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.Tracer; +import java.io.File; +import java.io.FileInputStream; +import java.io.InputStream; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -66,17 +102,26 @@ import java.util.List; import java.util.Set; import java.util.Stack; +import java.util.UUID; import java.util.concurrent.ExecutionException; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; +import javax.annotation.Nonnull; import javax.annotation.Nullable; import org.threeten.bp.Instant; /** Implementation for {@link Connection}, the generic Spanner connection API (not JDBC). */ class ConnectionImpl implements Connection { + private static final String INSTRUMENTATION_SCOPE = "cloud.google.com/java"; + private static final String SINGLE_USE_TRANSACTION = "SingleUseTransaction"; + private static final String READ_ONLY_TRANSACTION = "ReadOnlyTransaction"; + private static final String READ_WRITE_TRANSACTION = "ReadWriteTransaction"; + private static final String DDL_BATCH = "DdlBatch"; + private static final String DDL_STATEMENT = "DdlStatement"; + private static final String CLOSED_ERROR_MSG = "This connection is closed"; private static final String ONLY_ALLOWED_IN_AUTOCOMMIT = "This method may only be called while in autocommit mode"; @@ -95,7 +140,7 @@ private LeakedConnectionException() { } } - private volatile LeakedConnectionException leakedException;; + private volatile LeakedConnectionException leakedException; private final SpannerPool spannerPool; private AbstractStatementParser statementParser; /** @@ -187,13 +232,12 @@ static UnitOfWorkType of(TransactionMode transactionMode) { private boolean closed = false; private final Spanner spanner; + private final Tracer tracer; + private final Attributes openTelemetryAttributes; private final DdlClient ddlClient; private final DatabaseClient dbClient; private final BatchClient batchClient; - private boolean autocommit; - private boolean readOnly; - private boolean returnCommitStats; - private boolean delayTransactionStartUntilFirstWrite; + private final ConnectionState connectionState; private UnitOfWork currentUnitOfWork = null; /** @@ -210,45 +254,15 @@ static UnitOfWorkType of(TransactionMode transactionMode) { private BatchMode batchMode; private UnitOfWorkType unitOfWorkType; private final Stack transactionStack = new Stack<>(); - private boolean retryAbortsInternally; private final List transactionRetryListeners = new ArrayList<>(); - private AutocommitDmlMode autocommitDmlMode = AutocommitDmlMode.TRANSACTIONAL; - private TimestampBound readOnlyStaleness = TimestampBound.strong(); - /** - * autoPartitionMode will force this connection to execute all queries as partitioned queries. If - * a query cannot be executed as a partitioned query, for example if it is not partitionable, then - * the query will fail. This mode is intended for integrations with frameworks that should always - * use partitioned queries, and that do not support executing custom SQL statements. This setting - * can be used in combination with the dataBoostEnabled flag to force all queries to use data - * boost. - */ - private boolean autoPartitionMode; - /** - * dataBoostEnabled=true will cause all partitionedQueries to use data boost. All other queries - * and other statements ignore this flag. - */ - private boolean dataBoostEnabled; - /** - * maxPartitions determines the maximum number of partitions that will be used for partitioned - * queries. All other statements ignore this variable. - */ - private int maxPartitions; - /** - * maxPartitionedParallelism determines the maximum number of threads that will be used to execute - * partitions in parallel when executing a partitioned query on this connection. - */ - private int maxPartitionedParallelism; - - private DirectedReadOptions directedReadOptions = null; - private QueryOptions queryOptions = QueryOptions.getDefaultInstance(); - private RpcPriority rpcPriority = null; - private SavepointSupport savepointSupport = SavepointSupport.FAIL_AFTER_ROLLBACK; - private DdlInTransactionMode ddlInTransactionMode; + // The following properties are not 'normal' connection properties, but transient properties that + // are automatically reset after executing a transaction or statement. private String transactionTag; private String statementTag; - - private Duration maxCommitDelay; + private boolean excludeTxnFromChangeStreams; + private byte[] protoDescriptors; + private String protoDescriptorsFilePath; /** Create a connection and register it in the SpannerPool. */ ConnectionImpl(ConnectionOptions options) { @@ -261,26 +275,32 @@ static UnitOfWorkType of(TransactionMode transactionMode) { this.spannerPool = SpannerPool.INSTANCE; this.options = options; this.spanner = spannerPool.getSpanner(options, this); + this.tracer = + spanner + .getOptions() + .getOpenTelemetry() + .getTracer( + INSTRUMENTATION_SCOPE, + GaxProperties.getLibraryVersion(spanner.getOptions().getClass())); + this.openTelemetryAttributes = createOpenTelemetryAttributes(options.getDatabaseId()); if (options.isAutoConfigEmulator()) { EmulatorUtil.maybeCreateInstanceAndDatabase( spanner, options.getDatabaseId(), options.getDialect()); } this.dbClient = spanner.getDatabaseClient(options.getDatabaseId()); this.batchClient = spanner.getBatchClient(options.getDatabaseId()); - this.retryAbortsInternally = options.isRetryAbortsInternally(); - this.readOnly = options.isReadOnly(); - this.autocommit = options.isAutocommit(); - this.queryOptions = this.queryOptions.toBuilder().mergeFrom(options.getQueryOptions()).build(); - this.rpcPriority = options.getRPCPriority(); - this.ddlInTransactionMode = options.getDdlInTransactionMode(); - this.returnCommitStats = options.isReturnCommitStats(); - this.delayTransactionStartUntilFirstWrite = options.isDelayTransactionStartUntilFirstWrite(); - this.dataBoostEnabled = options.isDataBoostEnabled(); - this.autoPartitionMode = options.isAutoPartitionMode(); - this.maxPartitions = options.getMaxPartitions(); - this.maxPartitionedParallelism = options.getMaxPartitionedParallelism(); - this.maxCommitDelay = options.getMaxCommitDelay(); this.ddlClient = createDdlClient(); + this.connectionState = + new ConnectionState( + options.getInitialConnectionPropertyValues(), + Suppliers.memoize( + () -> + isEnableTransactionalConnectionStateForPostgreSQL() + && getDialect() == Dialect.POSTGRESQL + ? Type.TRANSACTIONAL + : Type.NON_TRANSACTIONAL)); + + // (Re)set the state of the connection to the default. setDefaultTransactionOptions(); } @@ -298,11 +318,16 @@ static UnitOfWorkType of(TransactionMode transactionMode) { new StatementExecutor(options.isUseVirtualThreads(), Collections.emptyList()); this.spannerPool = Preconditions.checkNotNull(spannerPool); this.options = Preconditions.checkNotNull(options); - this.ddlInTransactionMode = options.getDdlInTransactionMode(); this.spanner = spannerPool.getSpanner(options, this); + this.tracer = OpenTelemetry.noop().getTracer(INSTRUMENTATION_SCOPE); + this.openTelemetryAttributes = Attributes.empty(); this.ddlClient = Preconditions.checkNotNull(ddlClient); this.dbClient = Preconditions.checkNotNull(dbClient); this.batchClient = Preconditions.checkNotNull(batchClient); + this.connectionState = + new ConnectionState( + options.getInitialConnectionPropertyValues(), + Suppliers.ofInstance(Type.NON_TRANSACTIONAL)); setReadOnly(options.isReadOnly()); setAutocommit(options.isAutocommit()); setReturnCommitStats(options.isReturnCommitStats()); @@ -317,6 +342,7 @@ public Spanner getSpanner() { private DdlClient createDdlClient() { return DdlClient.newBuilder() .setDatabaseAdminClient(spanner.getDatabaseAdminClient()) + .setProjectId(options.getProjectId()) .setInstanceId(options.getInstanceId()) .setDatabaseName(options.getDatabaseName()) .build(); @@ -329,6 +355,25 @@ private AbstractStatementParser getStatementParser() { return this.statementParser; } + Attributes getOpenTelemetryAttributes() { + return this.openTelemetryAttributes; + } + + @VisibleForTesting + static Attributes createOpenTelemetryAttributes(DatabaseId databaseId) { + AttributesBuilder attributesBuilder = Attributes.builder(); + attributesBuilder.put("connection_id", UUID.randomUUID().toString()); + attributesBuilder.put("database", databaseId.getDatabase()); + attributesBuilder.put("instance_id", databaseId.getInstanceId().getInstance()); + attributesBuilder.put("project_id", databaseId.getInstanceId().getProject()); + return attributesBuilder.build(); + } + + @VisibleForTesting + ConnectionState.Type getConnectionStateType() { + return this.connectionState.getType(); + } + @Override public void close() { try { @@ -377,6 +422,53 @@ public ApiFuture closeAsync() { return ApiFutures.immediateFuture(null); } + private Context getCurrentContext() { + return Context.USER; + } + + /** + * Resets the state of this connection to the default state in the {@link ConnectionOptions} of + * this connection. + */ + public void reset() { + reset(getCurrentContext(), isInTransaction()); + } + + private void reset(Context context, boolean inTransaction) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + + // TODO: Replace all of these with a resetAll in ConnectionState. + this.connectionState.resetValue(RETRY_ABORTS_INTERNALLY, context, inTransaction); + this.connectionState.resetValue(AUTOCOMMIT, context, inTransaction); + this.connectionState.resetValue(READONLY, context, inTransaction); + this.connectionState.resetValue(READ_ONLY_STALENESS, context, inTransaction); + this.connectionState.resetValue(OPTIMIZER_VERSION, context, inTransaction); + this.connectionState.resetValue(OPTIMIZER_STATISTICS_PACKAGE, context, inTransaction); + this.connectionState.resetValue(RPC_PRIORITY, context, inTransaction); + this.connectionState.resetValue(DDL_IN_TRANSACTION_MODE, context, inTransaction); + this.connectionState.resetValue(RETURN_COMMIT_STATS, context, inTransaction); + this.connectionState.resetValue( + DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE, context, inTransaction); + this.connectionState.resetValue(KEEP_TRANSACTION_ALIVE, context, inTransaction); + this.connectionState.resetValue(AUTO_PARTITION_MODE, context, inTransaction); + this.connectionState.resetValue(DATA_BOOST_ENABLED, context, inTransaction); + this.connectionState.resetValue(MAX_PARTITIONS, context, inTransaction); + this.connectionState.resetValue(MAX_PARTITIONED_PARALLELISM, context, inTransaction); + this.connectionState.resetValue(MAX_COMMIT_DELAY, context, inTransaction); + + this.connectionState.resetValue(AUTOCOMMIT_DML_MODE, context, inTransaction); + this.statementTag = null; + this.statementTimeout = new StatementExecutor.StatementTimeout(); + this.connectionState.resetValue(DIRECTED_READ, context, inTransaction); + this.connectionState.resetValue(SAVEPOINT_SUPPORT, context, inTransaction); + this.protoDescriptors = null; + this.protoDescriptorsFilePath = null; + + if (!isTransactionStarted()) { + setDefaultTransactionOptions(); + } + } + /** Get the current unit-of-work type of this connection. */ UnitOfWorkType getUnitOfWorkType() { return unitOfWorkType; @@ -412,6 +504,39 @@ public boolean isClosed() { return closed; } + private T getConnectionPropertyValue( + com.google.cloud.spanner.connection.ConnectionProperty property) { + return this.connectionState.getValue(property).getValue(); + } + + private void setConnectionPropertyValue(ConnectionProperty property, T value) { + setConnectionPropertyValue(property, value, /* local = */ false); + } + + private void setConnectionPropertyValue( + ConnectionProperty property, T value, boolean local) { + if (local) { + setLocalConnectionPropertyValue(property, value); + } else { + this.connectionState.setValue(property, value, getCurrentContext(), isInTransaction()); + } + } + + /** + * Sets a connection property value only for the duration of the current transaction. The effects + * of this will be undone once the transaction ends, regardless whether the transaction is + * committed or rolled back. 'Local' properties are supported for both {@link + * com.google.cloud.spanner.connection.ConnectionState.Type#TRANSACTIONAL} and {@link + * com.google.cloud.spanner.connection.ConnectionState.Type#NON_TRANSACTIONAL} connection states. + * + *

    NOTE: This feature is not yet exposed in the public API. + */ + private void setLocalConnectionPropertyValue(ConnectionProperty property, T value) { + ConnectionPreconditions.checkState( + isInTransaction(), "SET LOCAL statements are only supported in transactions"); + this.connectionState.setLocalValue(property, value); + } + @Override public void setAutocommit(boolean autocommit) { ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); @@ -426,14 +551,24 @@ public void setAutocommit(boolean autocommit) { "Cannot set autocommit while in a temporary transaction"); ConnectionPreconditions.checkState( !transactionBeginMarked, "Cannot set autocommit when a transaction has begun"); - this.autocommit = autocommit; + setConnectionPropertyValue(AUTOCOMMIT, autocommit); + if (autocommit) { + // Commit the current transaction state if we went from autocommit=false to autocommit=true. + // Otherwise, we get the strange situation that autocommit=true cannot be committed, as we no + // longer have a transaction. Note that all the above state checks essentially mean that + // autocommit can only be set before a transaction has actually started, and not in the + // middle of a transaction. + this.connectionState.commit(); + } clearLastTransactionAndSetDefaultTransactionOptions(); // Reset the readOnlyStaleness value if it is no longer compatible with the new autocommit // value. - if (!autocommit - && (readOnlyStaleness.getMode() == Mode.MAX_STALENESS - || readOnlyStaleness.getMode() == Mode.MIN_READ_TIMESTAMP)) { - readOnlyStaleness = TimestampBound.strong(); + if (!autocommit) { + TimestampBound readOnlyStaleness = getReadOnlyStaleness(); + if (readOnlyStaleness.getMode() == Mode.MAX_STALENESS + || readOnlyStaleness.getMode() == Mode.MIN_READ_TIMESTAMP) { + setConnectionPropertyValue(READ_ONLY_STALENESS, TimestampBound.strong()); + } } } @@ -444,7 +579,7 @@ public boolean isAutocommit() { } private boolean internalIsAutocommit() { - return this.autocommit; + return getConnectionPropertyValue(AUTOCOMMIT); } @Override @@ -458,14 +593,14 @@ public void setReadOnly(boolean readOnly) { "Cannot set read-only while in a temporary transaction"); ConnectionPreconditions.checkState( !transactionBeginMarked, "Cannot set read-only when a transaction has begun"); - this.readOnly = readOnly; + setConnectionPropertyValue(READONLY, readOnly); clearLastTransactionAndSetDefaultTransactionOptions(); } @Override public boolean isReadOnly() { ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); - return this.readOnly; + return getConnectionPropertyValue(READONLY); } private void clearLastTransactionAndSetDefaultTransactionOptions() { @@ -484,7 +619,7 @@ public void setAutocommitDmlMode(AutocommitDmlMode mode) { "Cannot set autocommit DML mode while not in autocommit mode or while a transaction is active"); ConnectionPreconditions.checkState( !isReadOnly(), "Cannot set autocommit DML mode for a read-only connection"); - this.autocommitDmlMode = mode; + setConnectionPropertyValue(AUTOCOMMIT_DML_MODE, mode); } @Override @@ -492,7 +627,7 @@ public AutocommitDmlMode getAutocommitDmlMode() { ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); ConnectionPreconditions.checkState( !isBatchActive(), "Cannot get autocommit DML mode while in a batch"); - return this.autocommitDmlMode; + return getConnectionPropertyValue(AUTOCOMMIT_DML_MODE); } @Override @@ -510,14 +645,14 @@ public void setReadOnlyStaleness(TimestampBound staleness) { isAutocommit() && !inTransaction, "MAX_STALENESS and MIN_READ_TIMESTAMP are only allowed in autocommit mode"); } - this.readOnlyStaleness = staleness; + setConnectionPropertyValue(READ_ONLY_STALENESS, staleness); } @Override public TimestampBound getReadOnlyStaleness() { ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); ConnectionPreconditions.checkState(!isBatchActive(), "Cannot get read-only while in a batch"); - return this.readOnlyStaleness; + return getConnectionPropertyValue(READ_ONLY_STALENESS); } @Override @@ -526,57 +661,63 @@ public void setDirectedRead(DirectedReadOptions directedReadOptions) { ConnectionPreconditions.checkState( !isTransactionStarted(), "Cannot set directed read options when a transaction has been started"); - this.directedReadOptions = directedReadOptions; + setConnectionPropertyValue(DIRECTED_READ, directedReadOptions); } @Override public DirectedReadOptions getDirectedRead() { ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); - return this.directedReadOptions; + return getConnectionPropertyValue(DIRECTED_READ); } @Override public void setOptimizerVersion(String optimizerVersion) { Preconditions.checkNotNull(optimizerVersion); ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); - this.queryOptions = queryOptions.toBuilder().setOptimizerVersion(optimizerVersion).build(); + setConnectionPropertyValue(OPTIMIZER_VERSION, optimizerVersion); } @Override public String getOptimizerVersion() { ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); - return this.queryOptions.getOptimizerVersion(); + return getConnectionPropertyValue(OPTIMIZER_VERSION); } @Override public void setOptimizerStatisticsPackage(String optimizerStatisticsPackage) { Preconditions.checkNotNull(optimizerStatisticsPackage); ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); - this.queryOptions = - queryOptions.toBuilder().setOptimizerStatisticsPackage(optimizerStatisticsPackage).build(); + setConnectionPropertyValue(OPTIMIZER_STATISTICS_PACKAGE, optimizerStatisticsPackage); } @Override public String getOptimizerStatisticsPackage() { ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); - return this.queryOptions.getOptimizerStatisticsPackage(); + return getConnectionPropertyValue(OPTIMIZER_STATISTICS_PACKAGE); + } + + private QueryOptions buildQueryOptions() { + return QueryOptions.newBuilder() + .setOptimizerVersion(getConnectionPropertyValue(OPTIMIZER_VERSION)) + .setOptimizerStatisticsPackage(getConnectionPropertyValue(OPTIMIZER_STATISTICS_PACKAGE)) + .build(); } @Override public void setRPCPriority(RpcPriority rpcPriority) { ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); - this.rpcPriority = rpcPriority; + setConnectionPropertyValue(RPC_PRIORITY, rpcPriority); } @Override public RpcPriority getRPCPriority() { ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); - return this.rpcPriority; + return getConnectionPropertyValue(RPC_PRIORITY); } @Override public DdlInTransactionMode getDdlInTransactionMode() { - return this.ddlInTransactionMode; + return getConnectionPropertyValue(DDL_IN_TRANSACTION_MODE); } @Override @@ -586,7 +727,7 @@ public void setDdlInTransactionMode(DdlInTransactionMode ddlInTransactionMode) { !isBatchActive(), "Cannot set DdlInTransactionMode while in a batch"); ConnectionPreconditions.checkState( !isTransactionStarted(), "Cannot set DdlInTransactionMode while a transaction is active"); - this.ddlInTransactionMode = Preconditions.checkNotNull(ddlInTransactionMode); + setConnectionPropertyValue(DDL_IN_TRANSACTION_MODE, ddlInTransactionMode); } @Override @@ -695,6 +836,70 @@ public void setStatementTag(String tag) { this.statementTag = tag; } + @Override + public boolean isExcludeTxnFromChangeStreams() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState(!isDdlBatchActive(), "This connection is in a DDL batch"); + return excludeTxnFromChangeStreams; + } + + @Override + public void setExcludeTxnFromChangeStreams(boolean excludeTxnFromChangeStreams) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !isBatchActive(), "Cannot set exclude_txn_from_change_streams while in a batch"); + ConnectionPreconditions.checkState( + !isTransactionStarted(), + "exclude_txn_from_change_streams cannot be set after the transaction has started"); + this.excludeTxnFromChangeStreams = excludeTxnFromChangeStreams; + } + + @Override + public byte[] getProtoDescriptors() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + if (this.protoDescriptors == null && this.protoDescriptorsFilePath != null) { + // Read from file if filepath is valid + try { + File protoDescriptorsFile = new File(this.protoDescriptorsFilePath); + if (!protoDescriptorsFile.isFile()) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + String.format( + "File %s is not a valid proto descriptors file", this.protoDescriptorsFilePath)); + } + InputStream pdStream = new FileInputStream(protoDescriptorsFile); + this.protoDescriptors = ByteArray.copyFrom(pdStream).toByteArray(); + } catch (Exception exception) { + throw SpannerExceptionFactory.newSpannerException(exception); + } + } + return this.protoDescriptors; + } + + @Override + public void setProtoDescriptors(@Nonnull byte[] protoDescriptors) { + Preconditions.checkNotNull(protoDescriptors); + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !isBatchActive(), "Proto descriptors cannot be set when a batch is active"); + this.protoDescriptors = protoDescriptors; + this.protoDescriptorsFilePath = null; + } + + void setProtoDescriptorsFilePath(@Nonnull String protoDescriptorsFilePath) { + Preconditions.checkNotNull(protoDescriptorsFilePath); + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !isBatchActive(), "Proto descriptors file path cannot be set when a batch is active"); + this.protoDescriptorsFilePath = protoDescriptorsFilePath; + this.protoDescriptors = null; + } + + String getProtoDescriptorsFilePath() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return this.protoDescriptorsFilePath; + } + /** * Throws an {@link SpannerException} with code {@link ErrorCode#FAILED_PRECONDITION} if the * current state of this connection does not allow changing the setting for retryAbortsInternally. @@ -709,13 +914,13 @@ private void checkSetRetryAbortsInternallyAvailable() { @Override public boolean isRetryAbortsInternally() { ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); - return retryAbortsInternally; + return getConnectionPropertyValue(RETRY_ABORTS_INTERNALLY); } @Override public void setRetryAbortsInternally(boolean retryAbortsInternally) { checkSetRetryAbortsInternallyAvailable(); - this.retryAbortsInternally = retryAbortsInternally; + setConnectionPropertyValue(RETRY_ABORTS_INTERNALLY, retryAbortsInternally); } @Override @@ -761,6 +966,10 @@ private boolean internalIsTransactionStarted() { && this.currentUnitOfWork.getState() == UnitOfWorkState.STARTED; } + private boolean hasTransactionalChanges() { + return internalIsTransactionStarted() || this.connectionState.hasTransactionalChanges(); + } + @Override public Timestamp getReadTimestamp() { ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); @@ -804,26 +1013,31 @@ CommitResponse getCommitResponseOrNull() { @Override public void setReturnCommitStats(boolean returnCommitStats) { + setReturnCommitStats(returnCommitStats, /* local = */ false); + } + + @VisibleForTesting + void setReturnCommitStats(boolean returnCommitStats, boolean local) { ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); - this.returnCommitStats = returnCommitStats; + setConnectionPropertyValue(RETURN_COMMIT_STATS, returnCommitStats, local); } @Override public boolean isReturnCommitStats() { ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); - return this.returnCommitStats; + return getConnectionPropertyValue(RETURN_COMMIT_STATS); } @Override public void setMaxCommitDelay(Duration maxCommitDelay) { ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); - this.maxCommitDelay = maxCommitDelay; + setConnectionPropertyValue(MAX_COMMIT_DELAY, maxCommitDelay); } @Override public Duration getMaxCommitDelay() { ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); - return this.maxCommitDelay; + return getConnectionPropertyValue(MAX_COMMIT_DELAY); } @Override @@ -833,13 +1047,28 @@ public void setDelayTransactionStartUntilFirstWrite( ConnectionPreconditions.checkState( !isTransactionStarted(), "Cannot set DelayTransactionStartUntilFirstWrite while a transaction is active"); - this.delayTransactionStartUntilFirstWrite = delayTransactionStartUntilFirstWrite; + setConnectionPropertyValue( + DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE, delayTransactionStartUntilFirstWrite); } @Override public boolean isDelayTransactionStartUntilFirstWrite() { ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); - return this.delayTransactionStartUntilFirstWrite; + return getConnectionPropertyValue(DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE); + } + + @Override + public void setKeepTransactionAlive(boolean keepTransactionAlive) { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + ConnectionPreconditions.checkState( + !isTransactionStarted(), "Cannot set KeepTransactionAlive while a transaction is active"); + setConnectionPropertyValue(KEEP_TRANSACTION_ALIVE, keepTransactionAlive); + } + + @Override + public boolean isKeepTransactionAlive() { + ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); + return getConnectionPropertyValue(KEEP_TRANSACTION_ALIVE); } /** Resets this connection to its default transaction options. */ @@ -851,6 +1080,7 @@ private void setDefaultTransactionOptions() { : UnitOfWorkType.READ_WRITE_TRANSACTION; batchMode = BatchMode.NONE; transactionTag = null; + excludeTxnFromChangeStreams = false; } else { popUnitOfWorkFromTransactionStack(); } @@ -884,10 +1114,22 @@ private interface EndTransactionMethod { ApiFuture endAsync(CallType callType, UnitOfWork t); } - private static final class Commit implements EndTransactionMethod { + private final class Commit implements EndTransactionMethod { @Override public ApiFuture endAsync(CallType callType, UnitOfWork t) { - return t.commitAsync(callType); + return t.commitAsync( + callType, + new EndTransactionCallback() { + @Override + public void onSuccess() { + ConnectionImpl.this.connectionState.commit(); + } + + @Override + public void onFailure() { + ConnectionImpl.this.connectionState.rollback(); + } + }); } } @@ -908,10 +1150,22 @@ private ApiFuture commitAsync(CallType callType) { return endCurrentTransactionAsync(callType, commit); } - private static final class Rollback implements EndTransactionMethod { + private final class Rollback implements EndTransactionMethod { @Override public ApiFuture endAsync(CallType callType, UnitOfWork t) { - return t.rollbackAsync(callType); + return t.rollbackAsync( + callType, + new EndTransactionCallback() { + @Override + public void onSuccess() { + ConnectionImpl.this.connectionState.rollback(); + } + + @Override + public void onFailure() { + ConnectionImpl.this.connectionState.rollback(); + } + }); } } @@ -940,7 +1194,7 @@ private ApiFuture endCurrentTransactionAsync( statementTag == null, "Statement tags are not supported for COMMIT or ROLLBACK"); ApiFuture res; try { - if (isTransactionStarted()) { + if (hasTransactionalChanges()) { res = endTransactionMethod.endAsync(callType, getCurrentUnitOfWorkOrStartNewUnitOfWork()); } else { this.currentUnitOfWork = null; @@ -958,7 +1212,7 @@ private ApiFuture endCurrentTransactionAsync( @Override public SavepointSupport getSavepointSupport() { - return this.savepointSupport; + return getConnectionPropertyValue(SAVEPOINT_SUPPORT); } @Override @@ -968,12 +1222,13 @@ public void setSavepointSupport(SavepointSupport savepointSupport) { !isBatchActive(), "Cannot set SavepointSupport while in a batch"); ConnectionPreconditions.checkState( !isTransactionStarted(), "Cannot set SavepointSupport while a transaction is active"); - this.savepointSupport = savepointSupport; + setConnectionPropertyValue(SAVEPOINT_SUPPORT, savepointSupport); } @Override public void savepoint(String name) { ConnectionPreconditions.checkState(isInTransaction(), "This connection has no transaction"); + SavepointSupport savepointSupport = getSavepointSupport(); ConnectionPreconditions.checkState( savepointSupport.isSavepointCreationAllowed(), "This connection does not allow the creation of savepoints. Current value of SavepointSupport: " @@ -993,7 +1248,7 @@ public void rollbackToSavepoint(String name) { ConnectionPreconditions.checkState( isTransactionStarted(), "This connection has no active transaction"); getCurrentUnitOfWorkOrStartNewUnitOfWork() - .rollbackToSavepoint(checkValidIdentifier(name), savepointSupport); + .rollbackToSavepoint(checkValidIdentifier(name), getSavepointSupport()); } @Override @@ -1010,7 +1265,7 @@ public StatementResult execute(Statement statement, Set allowedResul private StatementResult internalExecute( Statement statement, @Nullable Set allowedResultTypes) { ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); - ParsedStatement parsedStatement = getStatementParser().parse(statement, this.queryOptions); + ParsedStatement parsedStatement = getStatementParser().parse(statement, buildQueryOptions()); checkResultTypeAllowed(parsedStatement, allowedResultTypes); switch (parsedStatement.getType()) { case CLIENT_SIDE: @@ -1089,7 +1344,7 @@ private static ResultType getResultType(ParsedStatement parsedStatement) { public AsyncStatementResult executeAsync(Statement statement) { Preconditions.checkNotNull(statement); ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); - ParsedStatement parsedStatement = getStatementParser().parse(statement, this.queryOptions); + ParsedStatement parsedStatement = getStatementParser().parse(statement, buildQueryOptions()); switch (parsedStatement.getType()) { case CLIENT_SIDE: return AsyncStatementResultImpl.of( @@ -1135,38 +1390,46 @@ public ResultSet analyzeQuery(Statement query, QueryAnalyzeMode queryMode) { @Override public void setDataBoostEnabled(boolean dataBoostEnabled) { - this.dataBoostEnabled = dataBoostEnabled; + setConnectionPropertyValue(DATA_BOOST_ENABLED, dataBoostEnabled); } @Override public boolean isDataBoostEnabled() { - return this.dataBoostEnabled; + return getConnectionPropertyValue(DATA_BOOST_ENABLED); } @Override public void setAutoPartitionMode(boolean autoPartitionMode) { - this.autoPartitionMode = autoPartitionMode; + setConnectionPropertyValue(AUTO_PARTITION_MODE, autoPartitionMode); } + /** + * autoPartitionMode will force this connection to execute all queries as partitioned queries. If + * a query cannot be executed as a partitioned query, for example if it is not partitionable, then + * the query will fail. This mode is intended for integrations with frameworks that should always + * use partitioned queries, and that do not support executing custom SQL statements. This setting + * can be used in combination with the dataBoostEnabled flag to force all queries to use data + * boost. + */ @Override public boolean isAutoPartitionMode() { - return this.autoPartitionMode; + return getConnectionPropertyValue(AUTO_PARTITION_MODE); } @Override public void setMaxPartitions(int maxPartitions) { - this.maxPartitions = maxPartitions; + setConnectionPropertyValue(MAX_PARTITIONS, maxPartitions); } @Override public int getMaxPartitions() { - return this.maxPartitions; + return getConnectionPropertyValue(MAX_PARTITIONS); } @Override public ResultSet partitionQuery( Statement query, PartitionOptions partitionOptions, QueryOption... options) { - ParsedStatement parsedStatement = getStatementParser().parse(query, this.queryOptions); + ParsedStatement parsedStatement = getStatementParser().parse(query, buildQueryOptions()); if (parsedStatement.getType() != StatementType.QUERY) { throw SpannerExceptionFactory.newSpannerException( ErrorCode.INVALID_ARGUMENT, @@ -1187,7 +1450,7 @@ public ResultSet partitionQuery( private PartitionOptions getEffectivePartitionOptions( PartitionOptions callSpecificPartitionOptions) { - if (maxPartitions == 0) { + if (getMaxPartitions() == 0) { if (callSpecificPartitionOptions == null) { return PartitionOptions.newBuilder().build(); } else { @@ -1201,11 +1464,11 @@ private PartitionOptions getEffectivePartitionOptions( if (callSpecificPartitionOptions != null && callSpecificPartitionOptions.getPartitionSizeBytes() > 0L) { return PartitionOptions.newBuilder() - .setMaxPartitions(maxPartitions) + .setMaxPartitions(getMaxPartitions()) .setPartitionSizeBytes(callSpecificPartitionOptions.getPartitionSizeBytes()) .build(); } - return PartitionOptions.newBuilder().setMaxPartitions(maxPartitions).build(); + return PartitionOptions.newBuilder().setMaxPartitions(getMaxPartitions()).build(); } @Override @@ -1220,12 +1483,12 @@ public ResultSet runPartition(String encodedPartitionId) { @Override public void setMaxPartitionedParallelism(int maxThreads) { Preconditions.checkArgument(maxThreads >= 0, "maxThreads must be >=0"); - this.maxPartitionedParallelism = maxThreads; + setConnectionPropertyValue(MAX_PARTITIONED_PARALLELISM, maxThreads); } @Override public int getMaxPartitionedParallelism() { - return this.maxPartitionedParallelism; + return getConnectionPropertyValue(MAX_PARTITIONED_PARALLELISM); } @Override @@ -1239,7 +1502,7 @@ public PartitionedQueryResultSet runPartitionedQuery( } // parallelism=0 means 'dynamically choose based on the number of available processors and the // number of partitions'. - return new MergedResultSet(this, partitionIds, maxPartitionedParallelism); + return new MergedResultSet(this, partitionIds, getMaxPartitionedParallelism()); } /** @@ -1251,7 +1514,7 @@ private ResultSet parseAndExecuteQuery( Preconditions.checkNotNull(query); Preconditions.checkNotNull(analyzeMode); ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); - ParsedStatement parsedStatement = getStatementParser().parse(query, this.queryOptions); + ParsedStatement parsedStatement = getStatementParser().parse(query, buildQueryOptions()); if (parsedStatement.isQuery() || parsedStatement.isUpdate()) { switch (parsedStatement.getType()) { case CLIENT_SIDE: @@ -1290,7 +1553,7 @@ private AsyncResultSet parseAndExecuteQueryAsync( CallType callType, Statement query, AnalyzeMode analyzeMode, QueryOption... options) { Preconditions.checkNotNull(query); ConnectionPreconditions.checkState(!isClosed(), CLOSED_ERROR_MSG); - ParsedStatement parsedStatement = getStatementParser().parse(query, this.queryOptions); + ParsedStatement parsedStatement = getStatementParser().parse(query, buildQueryOptions()); if (parsedStatement.isQuery() || parsedStatement.isUpdate()) { switch (parsedStatement.getType()) { case CLIENT_SIDE: @@ -1505,7 +1768,7 @@ private QueryOption[] concat( } private QueryOption[] mergeDataBoost(QueryOption... options) { - if (this.dataBoostEnabled) { + if (isDataBoostEnabled()) { options = appendQueryOption(options, Options.dataBoostEnabled(true)); } return options; @@ -1521,13 +1784,16 @@ private QueryOption[] mergeQueryStatementTag(QueryOption... options) { private QueryOption[] mergeQueryRequestOptions( ParsedStatement parsedStatement, QueryOption... options) { - if (this.rpcPriority != null) { - options = appendQueryOption(options, Options.priority(this.rpcPriority)); + if (getConnectionPropertyValue(RPC_PRIORITY) != null) { + options = + appendQueryOption(options, Options.priority(getConnectionPropertyValue(RPC_PRIORITY))); } - if (this.directedReadOptions != null - && currentUnitOfWork != null - && currentUnitOfWork.supportsDirectedReads(parsedStatement)) { - options = appendQueryOption(options, Options.directedRead(this.directedReadOptions)); + if (currentUnitOfWork != null + && currentUnitOfWork.supportsDirectedReads(parsedStatement) + && getConnectionPropertyValue(DIRECTED_READ) != null) { + options = + appendQueryOption( + options, Options.directedRead(getConnectionPropertyValue(DIRECTED_READ))); } return options; } @@ -1557,13 +1823,13 @@ private UpdateOption[] mergeUpdateStatementTag(UpdateOption... options) { } private UpdateOption[] mergeUpdateRequestOptions(UpdateOption... options) { - if (this.rpcPriority != null) { + if (getConnectionPropertyValue(RPC_PRIORITY) != null) { // Shortcut for the most common scenario. if (options == null || options.length == 0) { - options = new UpdateOption[] {Options.priority(this.rpcPriority)}; + options = new UpdateOption[] {Options.priority(getConnectionPropertyValue(RPC_PRIORITY))}; } else { options = Arrays.copyOf(options, options.length + 1); - options[options.length - 1] = Options.priority(this.rpcPriority); + options[options.length - 1] = Options.priority(getConnectionPropertyValue(RPC_PRIORITY)); } } return options; @@ -1582,7 +1848,7 @@ private ResultSet internalExecuteQuery( boolean isInternalMetadataQuery = isInternalMetadataQuery(options); QueryOption[] combinedOptions = concat(statement.getOptionsFromHints(), options); UnitOfWork transaction = getCurrentUnitOfWorkOrStartNewUnitOfWork(isInternalMetadataQuery); - if (autoPartitionMode + if (isAutoPartitionMode() && statement.getType() == StatementType.QUERY && !isInternalMetadataQuery) { return runPartitionedQuery( @@ -1606,7 +1872,7 @@ private AsyncResultSet internalExecuteQueryAsync( || (statement.getType() == StatementType.UPDATE && statement.hasReturningClause()), "Statement must be a query or DML with returning clause."); ConnectionPreconditions.checkState( - !(autoPartitionMode && statement.getType() == StatementType.QUERY), + !(isAutoPartitionMode() && statement.getType() == StatementType.QUERY), "Partitioned queries cannot be executed asynchronously"); boolean isInternalMetadataQuery = isInternalMetadataQuery(options); QueryOption[] combinedOptions = concat(statement.getOptionsFromHints(), options); @@ -1686,65 +1952,100 @@ UnitOfWork getCurrentUnitOfWorkOrStartNewUnitOfWork( createNewUnitOfWork( /* isInternalMetadataQuery = */ false, /* forceSingleUse = */ statementType == StatementType.DDL - && this.ddlInTransactionMode != DdlInTransactionMode.FAIL - && !this.transactionBeginMarked); + && getDdlInTransactionMode() != DdlInTransactionMode.FAIL + && !this.transactionBeginMarked, + statementType); } return this.currentUnitOfWork; } + private Span createSpanForUnitOfWork(String name) { + return tracer + .spanBuilder( + // We can memoize this, as it is a STARTUP property. + Suppliers.memoize(() -> this.connectionState.getValue(TRACING_PREFIX).getValue()).get() + + "." + + name) + .setAllAttributes(getOpenTelemetryAttributes()) + .startSpan(); + } + void maybeAutoCommitCurrentTransaction(StatementType statementType) { if (this.currentUnitOfWork instanceof ReadWriteTransaction && this.currentUnitOfWork.isActive() && statementType == StatementType.DDL - && this.ddlInTransactionMode == DdlInTransactionMode.AUTO_COMMIT_TRANSACTION) { + && getDdlInTransactionMode() == DdlInTransactionMode.AUTO_COMMIT_TRANSACTION) { commit(); } } @VisibleForTesting UnitOfWork createNewUnitOfWork(boolean isInternalMetadataQuery, boolean forceSingleUse) { + return createNewUnitOfWork(isInternalMetadataQuery, forceSingleUse, null); + } + + @VisibleForTesting + UnitOfWork createNewUnitOfWork( + boolean isInternalMetadataQuery, boolean forceSingleUse, StatementType statementType) { if (isInternalMetadataQuery || (isAutocommit() && !isInTransaction() && !isInBatch()) || forceSingleUse) { - return SingleUseTransaction.newBuilder() - .setInternalMetadataQuery(isInternalMetadataQuery) - .setDdlClient(ddlClient) - .setDatabaseClient(dbClient) - .setBatchClient(batchClient) - .setReadOnly(isReadOnly()) - .setReadOnlyStaleness(readOnlyStaleness) - .setAutocommitDmlMode(autocommitDmlMode) - .setReturnCommitStats(returnCommitStats) - .setMaxCommitDelay(maxCommitDelay) - .setStatementTimeout(statementTimeout) - .withStatementExecutor(statementExecutor) - .build(); + SingleUseTransaction singleUseTransaction = + SingleUseTransaction.newBuilder() + .setInternalMetadataQuery(isInternalMetadataQuery) + .setDdlClient(ddlClient) + .setDatabaseClient(dbClient) + .setBatchClient(batchClient) + .setReadOnly(getConnectionPropertyValue(READONLY)) + .setReadOnlyStaleness(getConnectionPropertyValue(READ_ONLY_STALENESS)) + .setAutocommitDmlMode(getConnectionPropertyValue(AUTOCOMMIT_DML_MODE)) + .setReturnCommitStats(getConnectionPropertyValue(RETURN_COMMIT_STATS)) + .setExcludeTxnFromChangeStreams(excludeTxnFromChangeStreams) + .setMaxCommitDelay(getConnectionPropertyValue(MAX_COMMIT_DELAY)) + .setStatementTimeout(statementTimeout) + .withStatementExecutor(statementExecutor) + .setSpan( + createSpanForUnitOfWork( + statementType == StatementType.DDL ? DDL_STATEMENT : SINGLE_USE_TRANSACTION)) + .setProtoDescriptors(getProtoDescriptors()) + .build(); + if (!isInternalMetadataQuery && !forceSingleUse) { + // Reset the transaction options after starting a single-use transaction. + setDefaultTransactionOptions(); + } + return singleUseTransaction; } else { switch (getUnitOfWorkType()) { case READ_ONLY_TRANSACTION: return ReadOnlyTransaction.newBuilder() .setDatabaseClient(dbClient) .setBatchClient(batchClient) - .setReadOnlyStaleness(readOnlyStaleness) + .setReadOnlyStaleness(getConnectionPropertyValue(READ_ONLY_STALENESS)) .setStatementTimeout(statementTimeout) .withStatementExecutor(statementExecutor) .setTransactionTag(transactionTag) - .setRpcPriority(rpcPriority) + .setRpcPriority(getConnectionPropertyValue(RPC_PRIORITY)) + .setSpan(createSpanForUnitOfWork(READ_ONLY_TRANSACTION)) .build(); case READ_WRITE_TRANSACTION: return ReadWriteTransaction.newBuilder() + .setUsesEmulator(options.usesEmulator()) .setUseAutoSavepointsForEmulator(options.useAutoSavepointsForEmulator()) .setDatabaseClient(dbClient) - .setDelayTransactionStartUntilFirstWrite(delayTransactionStartUntilFirstWrite) - .setRetryAbortsInternally(retryAbortsInternally) - .setSavepointSupport(savepointSupport) - .setReturnCommitStats(returnCommitStats) - .setMaxCommitDelay(maxCommitDelay) + .setDelayTransactionStartUntilFirstWrite( + getConnectionPropertyValue(DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE)) + .setKeepTransactionAlive(getConnectionPropertyValue(KEEP_TRANSACTION_ALIVE)) + .setRetryAbortsInternally(getConnectionPropertyValue(RETRY_ABORTS_INTERNALLY)) + .setSavepointSupport(getConnectionPropertyValue(SAVEPOINT_SUPPORT)) + .setReturnCommitStats(getConnectionPropertyValue(RETURN_COMMIT_STATS)) + .setMaxCommitDelay(getConnectionPropertyValue(MAX_COMMIT_DELAY)) .setTransactionRetryListeners(transactionRetryListeners) .setStatementTimeout(statementTimeout) .withStatementExecutor(statementExecutor) .setTransactionTag(transactionTag) - .setRpcPriority(rpcPriority) + .setExcludeTxnFromChangeStreams(excludeTxnFromChangeStreams) + .setRpcPriority(getConnectionPropertyValue(RPC_PRIORITY)) + .setSpan(createSpanForUnitOfWork(READ_WRITE_TRANSACTION)) .build(); case DML_BATCH: // A DML batch can run inside the current transaction. It should therefore only @@ -1755,7 +2056,10 @@ UnitOfWork createNewUnitOfWork(boolean isInternalMetadataQuery, boolean forceSin .setStatementTimeout(statementTimeout) .withStatementExecutor(statementExecutor) .setStatementTag(statementTag) - .setRpcPriority(rpcPriority) + .setExcludeTxnFromChangeStreams(excludeTxnFromChangeStreams) + .setRpcPriority(getConnectionPropertyValue(RPC_PRIORITY)) + // Use the transaction Span for the DML batch. + .setSpan(transactionStack.peek().getSpan()) .build(); case DDL_BATCH: return DdlBatch.newBuilder() @@ -1763,6 +2067,8 @@ UnitOfWork createNewUnitOfWork(boolean isInternalMetadataQuery, boolean forceSin .setDatabaseClient(dbClient) .setStatementTimeout(statementTimeout) .withStatementExecutor(statementExecutor) + .setSpan(createSpanForUnitOfWork(DDL_BATCH)) + .setProtoDescriptors(getProtoDescriptors()) .build(); default: } @@ -1786,7 +2092,11 @@ private void popUnitOfWorkFromTransactionStack() { } private ApiFuture executeDdlAsync(CallType callType, ParsedStatement ddl) { - return getOrStartDdlUnitOfWork().executeDdlAsync(callType, ddl); + ApiFuture result = getOrStartDdlUnitOfWork().executeDdlAsync(callType, ddl); + // reset proto descriptors after executing a DDL statement + this.protoDescriptors = null; + this.protoDescriptorsFilePath = null; + return result; } @Override @@ -1886,6 +2196,11 @@ public ApiFuture runBatchAsync() { } return ApiFutures.immediateFuture(new long[0]); } finally { + if (isDdlBatchActive()) { + // reset proto descriptors after executing a DDL batch + this.protoDescriptors = null; + this.protoDescriptorsFilePath = null; + } this.batchMode = BatchMode.NONE; setDefaultTransactionOptions(); } diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionOptions.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionOptions.java index 59c30789afb..1795ad172e2 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionOptions.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionOptions.java @@ -16,6 +16,41 @@ package com.google.cloud.spanner.connection; +import static com.google.cloud.spanner.connection.ConnectionProperties.AUTOCOMMIT; +import static com.google.cloud.spanner.connection.ConnectionProperties.AUTO_CONFIG_EMULATOR; +import static com.google.cloud.spanner.connection.ConnectionProperties.AUTO_PARTITION_MODE; +import static com.google.cloud.spanner.connection.ConnectionProperties.CHANNEL_PROVIDER; +import static com.google.cloud.spanner.connection.ConnectionProperties.CREDENTIALS_PROVIDER; +import static com.google.cloud.spanner.connection.ConnectionProperties.CREDENTIALS_URL; +import static com.google.cloud.spanner.connection.ConnectionProperties.DATABASE_ROLE; +import static com.google.cloud.spanner.connection.ConnectionProperties.DATA_BOOST_ENABLED; +import static com.google.cloud.spanner.connection.ConnectionProperties.DIALECT; +import static com.google.cloud.spanner.connection.ConnectionProperties.ENABLE_API_TRACING; +import static com.google.cloud.spanner.connection.ConnectionProperties.ENABLE_EXTENDED_TRACING; +import static com.google.cloud.spanner.connection.ConnectionProperties.ENCODED_CREDENTIALS; +import static com.google.cloud.spanner.connection.ConnectionProperties.ENDPOINT; +import static com.google.cloud.spanner.connection.ConnectionProperties.LENIENT; +import static com.google.cloud.spanner.connection.ConnectionProperties.MAX_COMMIT_DELAY; +import static com.google.cloud.spanner.connection.ConnectionProperties.MAX_PARTITIONED_PARALLELISM; +import static com.google.cloud.spanner.connection.ConnectionProperties.MAX_PARTITIONS; +import static com.google.cloud.spanner.connection.ConnectionProperties.MAX_SESSIONS; +import static com.google.cloud.spanner.connection.ConnectionProperties.MIN_SESSIONS; +import static com.google.cloud.spanner.connection.ConnectionProperties.NUM_CHANNELS; +import static com.google.cloud.spanner.connection.ConnectionProperties.OAUTH_TOKEN; +import static com.google.cloud.spanner.connection.ConnectionProperties.READONLY; +import static com.google.cloud.spanner.connection.ConnectionProperties.RETRY_ABORTS_INTERNALLY; +import static com.google.cloud.spanner.connection.ConnectionProperties.RETURN_COMMIT_STATS; +import static com.google.cloud.spanner.connection.ConnectionProperties.ROUTE_TO_LEADER; +import static com.google.cloud.spanner.connection.ConnectionProperties.TRACING_PREFIX; +import static com.google.cloud.spanner.connection.ConnectionProperties.TRACK_CONNECTION_LEAKS; +import static com.google.cloud.spanner.connection.ConnectionProperties.TRACK_SESSION_LEAKS; +import static com.google.cloud.spanner.connection.ConnectionProperties.USER_AGENT; +import static com.google.cloud.spanner.connection.ConnectionProperties.USE_AUTO_SAVEPOINTS_FOR_EMULATOR; +import static com.google.cloud.spanner.connection.ConnectionProperties.USE_PLAIN_TEXT; +import static com.google.cloud.spanner.connection.ConnectionProperties.USE_VIRTUAL_GRPC_TRANSPORT_THREADS; +import static com.google.cloud.spanner.connection.ConnectionProperties.USE_VIRTUAL_THREADS; +import static com.google.cloud.spanner.connection.ConnectionPropertyValue.cast; + import com.google.api.core.InternalApi; import com.google.api.gax.core.CredentialsProvider; import com.google.api.gax.rpc.TransportChannelProvider; @@ -37,19 +72,20 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.base.Strings; +import com.google.common.base.Suppliers; +import com.google.common.collect.ImmutableMap; import com.google.common.collect.Sets; -import com.google.spanner.v1.ExecuteSqlRequest.QueryOptions; import io.opentelemetry.api.OpenTelemetry; import java.io.IOException; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; import java.net.URL; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -86,7 +122,12 @@ */ @InternalApi public class ConnectionOptions { - /** Supported connection properties that can be included in the connection URI. */ + /** + * Supported connection properties that can be included in the connection URI. + * + * @deprecated Replaced by {@link com.google.cloud.spanner.connection.ConnectionProperty}. + */ + @Deprecated public static class ConnectionProperty { private static final String[] BOOLEAN_VALUES = new String[] {"true", "false"}; private final String name; @@ -100,9 +141,12 @@ private static ConnectionProperty createStringProperty(String name, String descr } private static ConnectionProperty createBooleanProperty( - String name, String description, boolean defaultValue) { + String name, String description, Boolean defaultValue) { return new ConnectionProperty( - name, description, String.valueOf(defaultValue), BOOLEAN_VALUES); + name, + description, + defaultValue == null ? "" : String.valueOf(defaultValue), + BOOLEAN_VALUES); } private static ConnectionProperty createIntProperty( @@ -163,38 +207,48 @@ public String[] getValidValues() { } } + /** + * Set this system property to true to enable transactional connection state by default for + * PostgreSQL-dialect databases. The default is currently false. + */ + public static String ENABLE_TRANSACTIONAL_CONNECTION_STATE_FOR_POSTGRESQL_PROPERTY = + "spanner.enable_transactional_connection_state_for_postgresql"; + private static final LocalConnectionChecker LOCAL_CONNECTION_CHECKER = new LocalConnectionChecker(); - private static final boolean DEFAULT_USE_PLAIN_TEXT = false; + static final boolean DEFAULT_USE_PLAIN_TEXT = false; static final boolean DEFAULT_AUTOCOMMIT = true; static final boolean DEFAULT_READONLY = false; static final boolean DEFAULT_RETRY_ABORTS_INTERNALLY = true; static final boolean DEFAULT_USE_VIRTUAL_THREADS = false; static final boolean DEFAULT_USE_VIRTUAL_GRPC_TRANSPORT_THREADS = false; - private static final String DEFAULT_CREDENTIALS = null; - private static final String DEFAULT_OAUTH_TOKEN = null; - private static final String DEFAULT_MIN_SESSIONS = null; - private static final String DEFAULT_MAX_SESSIONS = null; - private static final String DEFAULT_NUM_CHANNELS = null; + static final String DEFAULT_CREDENTIALS = null; + static final String DEFAULT_OAUTH_TOKEN = null; + static final Integer DEFAULT_MIN_SESSIONS = null; + static final Integer DEFAULT_MAX_SESSIONS = null; + static final Integer DEFAULT_NUM_CHANNELS = null; static final String DEFAULT_ENDPOINT = null; - private static final String DEFAULT_CHANNEL_PROVIDER = null; - private static final String DEFAULT_DATABASE_ROLE = null; - private static final String DEFAULT_USER_AGENT = null; - private static final String DEFAULT_OPTIMIZER_VERSION = ""; - private static final String DEFAULT_OPTIMIZER_STATISTICS_PACKAGE = ""; - private static final RpcPriority DEFAULT_RPC_PRIORITY = null; - private static final DdlInTransactionMode DEFAULT_DDL_IN_TRANSACTION_MODE = + static final String DEFAULT_CHANNEL_PROVIDER = null; + static final String DEFAULT_DATABASE_ROLE = null; + static final String DEFAULT_USER_AGENT = null; + static final String DEFAULT_OPTIMIZER_VERSION = ""; + static final String DEFAULT_OPTIMIZER_STATISTICS_PACKAGE = ""; + static final RpcPriority DEFAULT_RPC_PRIORITY = null; + static final DdlInTransactionMode DEFAULT_DDL_IN_TRANSACTION_MODE = DdlInTransactionMode.ALLOW_IN_EMPTY_TRANSACTION; - private static final boolean DEFAULT_RETURN_COMMIT_STATS = false; - private static final boolean DEFAULT_LENIENT = false; - private static final boolean DEFAULT_ROUTE_TO_LEADER = true; - private static final boolean DEFAULT_DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE = false; - private static final boolean DEFAULT_TRACK_SESSION_LEAKS = true; - private static final boolean DEFAULT_TRACK_CONNECTION_LEAKS = true; - private static final boolean DEFAULT_DATA_BOOST_ENABLED = false; - private static final boolean DEFAULT_AUTO_PARTITION_MODE = false; - private static final int DEFAULT_MAX_PARTITIONS = 0; - private static final int DEFAULT_MAX_PARTITIONED_PARALLELISM = 1; + static final boolean DEFAULT_RETURN_COMMIT_STATS = false; + static final boolean DEFAULT_LENIENT = false; + static final boolean DEFAULT_ROUTE_TO_LEADER = true; + static final boolean DEFAULT_DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE = false; + static final boolean DEFAULT_KEEP_TRANSACTION_ALIVE = false; + static final boolean DEFAULT_TRACK_SESSION_LEAKS = true; + static final boolean DEFAULT_TRACK_CONNECTION_LEAKS = true; + static final boolean DEFAULT_DATA_BOOST_ENABLED = false; + static final boolean DEFAULT_AUTO_PARTITION_MODE = false; + static final int DEFAULT_MAX_PARTITIONS = 0; + static final int DEFAULT_MAX_PARTITIONED_PARALLELISM = 1; + static final Boolean DEFAULT_ENABLE_EXTENDED_TRACING = null; + static final Boolean DEFAULT_ENABLE_API_TRACING = null; private static final String PLAIN_TEXT_PROTOCOL = "http:"; private static final String HOST_PROTOCOL = "https:"; @@ -202,7 +256,7 @@ public String[] getValidValues() { private static final String SPANNER_EMULATOR_HOST_ENV_VAR = "SPANNER_EMULATOR_HOST"; private static final String DEFAULT_EMULATOR_HOST = "http://localhost:9010"; /** Use plain text is only for local testing purposes. */ - private static final String USE_PLAIN_TEXT_PROPERTY_NAME = "usePlainText"; + static final String USE_PLAIN_TEXT_PROPERTY_NAME = "usePlainText"; /** Name of the 'autocommit' connection property. */ public static final String AUTOCOMMIT_PROPERTY_NAME = "autocommit"; /** Name of the 'readonly' connection property. */ @@ -245,12 +299,11 @@ public String[] getValidValues() { public static final String ENABLE_CHANNEL_PROVIDER_SYSTEM_PROPERTY = "ENABLE_CHANNEL_PROVIDER"; /** Custom user agent string is only for other Google libraries. */ - private static final String USER_AGENT_PROPERTY_NAME = "userAgent"; + static final String USER_AGENT_PROPERTY_NAME = "userAgent"; /** Query optimizer version to use for a connection. */ - private static final String OPTIMIZER_VERSION_PROPERTY_NAME = "optimizerVersion"; + static final String OPTIMIZER_VERSION_PROPERTY_NAME = "optimizerVersion"; /** Query optimizer statistics package to use for a connection. */ - private static final String OPTIMIZER_STATISTICS_PACKAGE_PROPERTY_NAME = - "optimizerStatisticsPackage"; + static final String OPTIMIZER_STATISTICS_PACKAGE_PROPERTY_NAME = "optimizerStatisticsPackage"; /** Name of the 'lenientMode' connection property. */ public static final String LENIENT_PROPERTY_NAME = "lenient"; /** Name of the 'rpcPriority' connection property. */ @@ -258,12 +311,14 @@ public String[] getValidValues() { public static final String DDL_IN_TRANSACTION_MODE_PROPERTY_NAME = "ddlInTransactionMode"; /** Dialect to use for a connection. */ - private static final String DIALECT_PROPERTY_NAME = "dialect"; + static final String DIALECT_PROPERTY_NAME = "dialect"; /** Name of the 'databaseRole' connection property. */ public static final String DATABASE_ROLE_PROPERTY_NAME = "databaseRole"; /** Name of the 'delay transaction start until first write' property. */ public static final String DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE_NAME = "delayTransactionStartUntilFirstWrite"; + /** Name of the 'keep transaction alive' property. */ + public static final String KEEP_TRANSACTION_ALIVE_PROPERTY_NAME = "keepTransactionAlive"; /** Name of the 'trackStackTraceOfSessionCheckout' connection property. */ public static final String TRACK_SESSION_LEAKS_PROPERTY_NAME = "trackSessionLeaks"; /** Name of the 'trackStackTraceOfConnectionCreation' connection property. */ @@ -275,6 +330,9 @@ public String[] getValidValues() { public static final String MAX_PARTITIONED_PARALLELISM_PROPERTY_NAME = "maxPartitionedParallelism"; + public static final String ENABLE_EXTENDED_TRACING_PROPERTY_NAME = "enableExtendedTracing"; + public static final String ENABLE_API_TRACING_PROPERTY_NAME = "enableApiTracing"; + private static final String GUARDED_CONNECTION_PROPERTY_ERROR_MESSAGE = "%s can only be used if the system property %s has been set to true. " + "Start the application with the JVM command line option -D%s=true"; @@ -288,7 +346,17 @@ private static String generateGuardedConnectionPropertyError( systemPropertyName); } - /** All valid connection properties. */ + static boolean isEnableTransactionalConnectionStateForPostgreSQL() { + return Boolean.parseBoolean( + System.getProperty(ENABLE_TRANSACTIONAL_CONNECTION_STATE_FOR_POSTGRESQL_PROPERTY, "false")); + } + + /** + * All valid connection properties. + * + * @deprecated Replaced by {@link ConnectionProperties#CONNECTION_PROPERTIES} + */ + @Deprecated public static final Set VALID_PROPERTIES = Collections.unmodifiableSet( new HashSet<>( @@ -361,7 +429,8 @@ private static String generateGuardedConnectionPropertyError( "Sets the default query optimizer version to use for this connection."), ConnectionProperty.createStringProperty( OPTIMIZER_STATISTICS_PACKAGE_PROPERTY_NAME, ""), - ConnectionProperty.createBooleanProperty("returnCommitStats", "", false), + ConnectionProperty.createBooleanProperty( + "returnCommitStats", "", DEFAULT_RETURN_COMMIT_STATS), ConnectionProperty.createStringProperty( "maxCommitDelay", "The maximum commit delay in milliseconds that should be applied to commit requests from this connection."), @@ -371,6 +440,10 @@ private static String generateGuardedConnectionPropertyError( + "The instance and database in the connection string will automatically be created if these do not yet exist on the emulator. " + "Add dialect=postgresql to the connection string to make sure that the database that is created uses the PostgreSQL dialect.", false), + ConnectionProperty.createBooleanProperty( + "useAutoSavepointsForEmulator", + "Automatically creates savepoints for each statement in a read/write transaction when using the Emulator. This is no longer needed when using Emulator version 1.5.23 or higher.", + false), ConnectionProperty.createBooleanProperty( LENIENT_PROPERTY_NAME, "Silently ignore unknown properties in the connection string/properties (true/false)", @@ -397,6 +470,12 @@ private static String generateGuardedConnectionPropertyError( + "the first write operation in a read/write transaction will be executed using the read/write transaction. Enabling this mode can reduce locking " + "and improve performance for applications that can handle the lower transaction isolation semantics.", DEFAULT_DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE), + ConnectionProperty.createBooleanProperty( + KEEP_TRANSACTION_ALIVE_PROPERTY_NAME, + "Enabling this option will trigger the connection to keep read/write transactions alive by executing a SELECT 1 query once every 10 seconds " + + "if no other statements are being executed. This option should be used with caution, as it can keep transactions alive and hold on to locks " + + "longer than intended. This option should typically be used for CLI-type application that might wait for user input for a longer period of time.", + DEFAULT_KEEP_TRANSACTION_ALIVE), ConnectionProperty.createBooleanProperty( TRACK_SESSION_LEAKS_PROPERTY_NAME, "Capture the call stack of the thread that checked out a session of the session pool. This will " @@ -436,7 +515,20 @@ private static String generateGuardedConnectionPropertyError( "The maximum number of partitions that will be executed in parallel " + "for partitioned queries on this connection. Set this value to 0 to " + "dynamically use the number of processors available in the runtime.", - DEFAULT_MAX_PARTITIONED_PARALLELISM)))); + DEFAULT_MAX_PARTITIONED_PARALLELISM), + ConnectionProperty.createBooleanProperty( + ENABLE_EXTENDED_TRACING_PROPERTY_NAME, + "Include the SQL string in the OpenTelemetry traces that are generated " + + "by this connection. The SQL string is added as the standard OpenTelemetry " + + "attribute 'db.statement'.", + DEFAULT_ENABLE_EXTENDED_TRACING), + ConnectionProperty.createBooleanProperty( + ENABLE_API_TRACING_PROPERTY_NAME, + "Add OpenTelemetry traces for each individual RPC call. Enable this " + + "to get a detailed view of each RPC that is being executed by your application, " + + "or if you want to debug potential latency problems caused by RPCs that are " + + "being retried.", + DEFAULT_ENABLE_API_TRACING)))); private static final Set INTERNAL_PROPERTIES = Collections.unmodifiableSet( @@ -499,9 +591,9 @@ public interface ExternalChannelProvider { /** Builder for {@link ConnectionOptions} instances. */ public static class Builder { + private final Map> connectionPropertyValues = + new HashMap<>(); private String uri; - private String credentialsUrl; - private String oauthToken; private Credentials credentials; private SessionPoolOptions sessionPoolOptions; private List statementExecutionInterceptors = @@ -590,11 +682,20 @@ public Builder setUri(String uri) { Preconditions.checkArgument( isValidUri(uri), "The specified URI is not a valid Cloud Spanner connection URI. Please specify a URI in the format \"cloudspanner:[//host[:port]]/projects/project-id[/instances/instance-id[/databases/database-name]][\\?property-name=property-value[;property-name=property-value]*]?\""); - checkValidProperties(uri); + ConnectionPropertyValue value = + cast(ConnectionProperties.parseValues(uri).get(LENIENT.getKey())); + checkValidProperties(value != null && value.getValue(), uri); this.uri = uri; return this; } + Builder setConnectionPropertyValue( + com.google.cloud.spanner.connection.ConnectionProperty property, T value) { + this.connectionPropertyValues.put( + property.getKey(), new ConnectionPropertyValue<>(property, value, value)); + return this; + } + /** Sets the {@link SessionPoolOptions} to use for the connection. */ public Builder setSessionPoolOptions(SessionPoolOptions sessionPoolOptions) { Preconditions.checkNotNull(sessionPoolOptions); @@ -619,7 +720,7 @@ public Builder setSessionPoolOptions(SessionPoolOptions sessionPoolOptions) { * @return this builder */ public Builder setCredentialsUrl(String credentialsUrl) { - this.credentialsUrl = credentialsUrl; + setConnectionPropertyValue(CREDENTIALS_URL, credentialsUrl); return this; } @@ -635,7 +736,7 @@ public Builder setCredentialsUrl(String credentialsUrl) { * @return this builder */ public Builder setOAuthToken(String oauthToken) { - this.oauthToken = oauthToken; + setConnectionPropertyValue(OAUTH_TOKEN, oauthToken); return this; } @@ -662,6 +763,11 @@ public Builder setOpenTelemetry(OpenTelemetry openTelemetry) { return this; } + public Builder setTracingPrefix(String tracingPrefix) { + setConnectionPropertyValue(TRACING_PREFIX, tracingPrefix); + return this; + } + /** @return the {@link ConnectionOptions} */ public ConnectionOptions build() { Preconditions.checkState(this.uri != null, "Connection URI is required"); @@ -679,49 +785,18 @@ public static Builder newBuilder() { return new Builder(); } + private final ConnectionState initialConnectionState; private final String uri; private final String warnings; - private final String credentialsUrl; - private final String encodedCredentials; - private final CredentialsProvider credentialsProvider; - private final String oauthToken; private final Credentials fixedCredentials; - private final boolean usePlainText; private final String host; private final String projectId; private final String instanceId; private final String databaseName; private final Credentials credentials; private final SessionPoolOptions sessionPoolOptions; - private final Integer numChannels; - private final String channelProvider; - private final Integer minSessions; - private final Integer maxSessions; - private final String databaseRole; - private final String userAgent; - private final QueryOptions queryOptions; - private final boolean returnCommitStats; - private final Long maxCommitDelay; - private final boolean autoConfigEmulator; - private final Dialect dialect; - private final RpcPriority rpcPriority; - private final DdlInTransactionMode ddlInTransactionMode; - private final boolean delayTransactionStartUntilFirstWrite; - private final boolean trackSessionLeaks; - private final boolean trackConnectionLeaks; - - private final boolean dataBoostEnabled; - private final boolean autoPartitionMode; - private final int maxPartitions; - private final int maxPartitionedParallelism; - - private final boolean autocommit; - private final boolean readOnly; - private final boolean routeToLeader; - private final boolean retryAbortsInternally; - private final boolean useVirtualThreads; - private final boolean useVirtualGrpcTransportThreads; + private final OpenTelemetry openTelemetry; private final List statementExecutionInterceptors; private final SpannerOptionsConfigurator configurator; @@ -730,70 +805,79 @@ private ConnectionOptions(Builder builder) { Matcher matcher = Builder.SPANNER_URI_PATTERN.matcher(builder.uri); Preconditions.checkArgument( matcher.find(), String.format("Invalid connection URI specified: %s", builder.uri)); - this.warnings = checkValidProperties(builder.uri); + ImmutableMap> connectionPropertyValues = + ImmutableMap.>builder() + .putAll(ConnectionProperties.parseValues(builder.uri)) + .putAll(builder.connectionPropertyValues) + .buildKeepingLast(); this.uri = builder.uri; - this.credentialsUrl = - builder.credentialsUrl != null ? builder.credentialsUrl : parseCredentials(builder.uri); - this.encodedCredentials = parseEncodedCredentials(builder.uri); - this.credentialsProvider = parseCredentialsProvider(builder.uri); - this.oauthToken = - builder.oauthToken != null ? builder.oauthToken : parseOAuthToken(builder.uri); + ConnectionPropertyValue value = cast(connectionPropertyValues.get(LENIENT.getKey())); + this.warnings = checkValidProperties(value != null && value.getValue(), uri); + this.fixedCredentials = builder.credentials; + + this.openTelemetry = builder.openTelemetry; + this.statementExecutionInterceptors = + Collections.unmodifiableList(builder.statementExecutionInterceptors); + this.configurator = builder.configurator; + + // Create the initial connection state from the parsed properties in the connection URL. + this.initialConnectionState = new ConnectionState(connectionPropertyValues); + // Check that at most one of credentials location, encoded credentials, credentials provider and // OUAuth token has been specified in the connection URI. Preconditions.checkArgument( Stream.of( - this.credentialsUrl, - this.encodedCredentials, - this.credentialsProvider, - this.oauthToken) + getInitialConnectionPropertyValue(CREDENTIALS_URL), + getInitialConnectionPropertyValue(ENCODED_CREDENTIALS), + getInitialConnectionPropertyValue(CREDENTIALS_PROVIDER), + getInitialConnectionPropertyValue(OAUTH_TOKEN)) .filter(Objects::nonNull) .count() <= 1, "Specify only one of credentialsUrl, encodedCredentials, credentialsProvider and OAuth token"); - this.fixedCredentials = builder.credentials; + checkGuardedProperty( + getInitialConnectionPropertyValue(ENCODED_CREDENTIALS), + ENABLE_ENCODED_CREDENTIALS_SYSTEM_PROPERTY, + ENCODED_CREDENTIALS_PROPERTY_NAME); + checkGuardedProperty( + getInitialConnectionPropertyValue(CREDENTIALS_PROVIDER) == null + ? null + : getInitialConnectionPropertyValue(CREDENTIALS_PROVIDER).getClass().getName(), + ENABLE_CREDENTIALS_PROVIDER_SYSTEM_PROPERTY, + CREDENTIALS_PROVIDER_PROPERTY_NAME); + checkGuardedProperty( + getInitialConnectionPropertyValue(CHANNEL_PROVIDER), + ENABLE_CHANNEL_PROVIDER_SYSTEM_PROPERTY, + CHANNEL_PROVIDER_PROPERTY_NAME); - this.userAgent = parseUserAgent(this.uri); - QueryOptions.Builder queryOptionsBuilder = QueryOptions.newBuilder(); - queryOptionsBuilder.setOptimizerVersion(parseOptimizerVersion(this.uri)); - queryOptionsBuilder.setOptimizerStatisticsPackage(parseOptimizerStatisticsPackage(this.uri)); - this.queryOptions = queryOptionsBuilder.build(); - this.returnCommitStats = parseReturnCommitStats(this.uri); - this.maxCommitDelay = parseMaxCommitDelay(this.uri); - this.autoConfigEmulator = parseAutoConfigEmulator(this.uri); - this.dialect = parseDialect(this.uri); - this.usePlainText = this.autoConfigEmulator || parseUsePlainText(this.uri); + boolean usePlainText = + getInitialConnectionPropertyValue(AUTO_CONFIG_EMULATOR) + || getInitialConnectionPropertyValue(USE_PLAIN_TEXT); this.host = determineHost( - matcher, parseEndpoint(this.uri), autoConfigEmulator, usePlainText, System.getenv()); - this.rpcPriority = parseRPCPriority(this.uri); - this.ddlInTransactionMode = parseDdlInTransactionMode(this.uri); - this.delayTransactionStartUntilFirstWrite = parseDelayTransactionStartUntilFirstWrite(this.uri); - this.trackSessionLeaks = parseTrackSessionLeaks(this.uri); - this.trackConnectionLeaks = parseTrackConnectionLeaks(this.uri); - - this.dataBoostEnabled = parseDataBoostEnabled(this.uri); - this.autoPartitionMode = parseAutoPartitionMode(this.uri); - this.maxPartitions = parseMaxPartitions(this.uri); - this.maxPartitionedParallelism = parseMaxPartitionedParallelism(this.uri); - - this.instanceId = matcher.group(Builder.INSTANCE_GROUP); - this.databaseName = matcher.group(Builder.DATABASE_GROUP); + matcher, + getInitialConnectionPropertyValue(ENDPOINT), + getInitialConnectionPropertyValue(AUTO_CONFIG_EMULATOR), + usePlainText, + System.getenv()); // Using credentials on a plain text connection is not allowed, so if the user has not specified // any credentials and is using a plain text connection, we should not try to get the // credentials from the environment, but default to NoCredentials. if (this.fixedCredentials == null - && this.credentialsUrl == null - && this.encodedCredentials == null - && this.credentialsProvider == null - && this.oauthToken == null - && this.usePlainText) { + && getInitialConnectionPropertyValue(CREDENTIALS_URL) == null + && getInitialConnectionPropertyValue(ENCODED_CREDENTIALS) == null + && getInitialConnectionPropertyValue(CREDENTIALS_PROVIDER) == null + && getInitialConnectionPropertyValue(OAUTH_TOKEN) == null + && usePlainText) { this.credentials = NoCredentials.getInstance(); - } else if (this.oauthToken != null) { - this.credentials = new GoogleCredentials(new AccessToken(oauthToken, null)); - } else if (this.credentialsProvider != null) { + } else if (getInitialConnectionPropertyValue(OAUTH_TOKEN) != null) { + this.credentials = + new GoogleCredentials( + new AccessToken(getInitialConnectionPropertyValue(OAUTH_TOKEN), null)); + } else if (getInitialConnectionPropertyValue(CREDENTIALS_PROVIDER) != null) { try { - this.credentials = this.credentialsProvider.getCredentials(); + this.credentials = getInitialConnectionPropertyValue(CREDENTIALS_PROVIDER).getCredentials(); } catch (IOException exception) { throw SpannerExceptionFactory.newSpannerException( ErrorCode.INVALID_ARGUMENT, @@ -802,49 +886,31 @@ private ConnectionOptions(Builder builder) { } } else if (this.fixedCredentials != null) { this.credentials = fixedCredentials; - } else if (this.encodedCredentials != null) { - this.credentials = getCredentialsService().decodeCredentials(this.encodedCredentials); + } else if (getInitialConnectionPropertyValue(ENCODED_CREDENTIALS) != null) { + this.credentials = + getCredentialsService() + .decodeCredentials(getInitialConnectionPropertyValue(ENCODED_CREDENTIALS)); } else { - this.credentials = getCredentialsService().createCredentials(this.credentialsUrl); + this.credentials = + getCredentialsService() + .createCredentials(getInitialConnectionPropertyValue(CREDENTIALS_URL)); } - this.minSessions = - parseIntegerProperty(MIN_SESSIONS_PROPERTY_NAME, parseMinSessions(builder.uri)); - this.maxSessions = - parseIntegerProperty(MAX_SESSIONS_PROPERTY_NAME, parseMaxSessions(builder.uri)); - this.numChannels = - parseIntegerProperty(NUM_CHANNELS_PROPERTY_NAME, parseNumChannels(builder.uri)); - this.channelProvider = parseChannelProvider(builder.uri); - this.databaseRole = parseDatabaseRole(this.uri); - String projectId = matcher.group(Builder.PROJECT_GROUP); - if (Builder.DEFAULT_PROJECT_ID_PLACEHOLDER.equalsIgnoreCase(projectId)) { - projectId = getDefaultProjectId(this.credentials); - } - this.projectId = projectId; - - this.autocommit = parseAutocommit(this.uri); - this.readOnly = parseReadOnly(this.uri); - this.routeToLeader = parseRouteToLeader(this.uri); - this.retryAbortsInternally = parseRetryAbortsInternally(this.uri); - this.useVirtualThreads = parseUseVirtualThreads(this.uri); - this.useVirtualGrpcTransportThreads = parseUseVirtualGrpcTransportThreads(this.uri); - this.openTelemetry = builder.openTelemetry; - this.statementExecutionInterceptors = - Collections.unmodifiableList(builder.statementExecutionInterceptors); - this.configurator = builder.configurator; - - if (this.minSessions != null || this.maxSessions != null || !this.trackSessionLeaks) { + if (getInitialConnectionPropertyValue(MIN_SESSIONS) != null + || getInitialConnectionPropertyValue(MAX_SESSIONS) != null + || !getInitialConnectionPropertyValue(TRACK_SESSION_LEAKS)) { SessionPoolOptions.Builder sessionPoolOptionsBuilder = builder.sessionPoolOptions == null ? SessionPoolOptions.newBuilder() : builder.sessionPoolOptions.toBuilder(); - sessionPoolOptionsBuilder.setTrackStackTraceOfSessionCheckout(this.trackSessionLeaks); + sessionPoolOptionsBuilder.setTrackStackTraceOfSessionCheckout( + getInitialConnectionPropertyValue(TRACK_SESSION_LEAKS)); sessionPoolOptionsBuilder.setAutoDetectDialect(true); - if (this.minSessions != null) { - sessionPoolOptionsBuilder.setMinSessions(this.minSessions); + if (getInitialConnectionPropertyValue(MIN_SESSIONS) != null) { + sessionPoolOptionsBuilder.setMinSessions(getInitialConnectionPropertyValue(MIN_SESSIONS)); } - if (this.maxSessions != null) { - sessionPoolOptionsBuilder.setMaxSessions(this.maxSessions); + if (getInitialConnectionPropertyValue(MAX_SESSIONS) != null) { + sessionPoolOptionsBuilder.setMaxSessions(getInitialConnectionPropertyValue(MAX_SESSIONS)); } this.sessionPoolOptions = sessionPoolOptionsBuilder.build(); } else if (builder.sessionPoolOptions != null) { @@ -852,6 +918,14 @@ private ConnectionOptions(Builder builder) { } else { this.sessionPoolOptions = SessionPoolOptions.newBuilder().setAutoDetectDialect(true).build(); } + + String projectId = matcher.group(Builder.PROJECT_GROUP); + if (Builder.DEFAULT_PROJECT_ID_PLACEHOLDER.equalsIgnoreCase(projectId)) { + projectId = getDefaultProjectId(this.credentials); + } + this.projectId = projectId; + this.instanceId = matcher.group(Builder.INSTANCE_GROUP); + this.databaseName = matcher.group(Builder.DATABASE_GROUP); } @VisibleForTesting @@ -886,20 +960,6 @@ static String determineHost( return HOST_PROTOCOL + host; } - private static Integer parseIntegerProperty(String propertyName, String value) { - if (value != null) { - try { - return Integer.valueOf(value); - } catch (NumberFormatException e) { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.INVALID_ARGUMENT, - String.format("Invalid %s value specified: %s", propertyName, value), - e); - } - } - return null; - } - /** * @return an instance of OpenTelemetry. If OpenTelemetry object is not set then null * will be returned. @@ -917,103 +977,6 @@ CredentialsService getCredentialsService() { return CredentialsService.INSTANCE; } - @VisibleForTesting - static boolean parseUsePlainText(String uri) { - String value = parseUriProperty(uri, USE_PLAIN_TEXT_PROPERTY_NAME); - return value != null ? Boolean.parseBoolean(value) : DEFAULT_USE_PLAIN_TEXT; - } - - @VisibleForTesting - static boolean parseAutocommit(String uri) { - String value = parseUriProperty(uri, AUTOCOMMIT_PROPERTY_NAME); - return value != null ? Boolean.parseBoolean(value) : DEFAULT_AUTOCOMMIT; - } - - @VisibleForTesting - static boolean parseReadOnly(String uri) { - String value = parseUriProperty(uri, READONLY_PROPERTY_NAME); - return value != null ? Boolean.parseBoolean(value) : DEFAULT_READONLY; - } - - static boolean parseRouteToLeader(String uri) { - String value = parseUriProperty(uri, ROUTE_TO_LEADER_PROPERTY_NAME); - return value != null ? Boolean.parseBoolean(value) : DEFAULT_ROUTE_TO_LEADER; - } - - @VisibleForTesting - static boolean parseRetryAbortsInternally(String uri) { - String value = parseUriProperty(uri, RETRY_ABORTS_INTERNALLY_PROPERTY_NAME); - return value != null ? Boolean.parseBoolean(value) : DEFAULT_RETRY_ABORTS_INTERNALLY; - } - - @VisibleForTesting - static boolean parseUseVirtualThreads(String uri) { - String value = parseUriProperty(uri, USE_VIRTUAL_THREADS_PROPERTY_NAME); - return value != null ? Boolean.parseBoolean(value) : DEFAULT_USE_VIRTUAL_THREADS; - } - - @VisibleForTesting - static boolean parseUseVirtualGrpcTransportThreads(String uri) { - String value = parseUriProperty(uri, USE_VIRTUAL_GRPC_TRANSPORT_THREADS_PROPERTY_NAME); - return value != null ? Boolean.parseBoolean(value) : DEFAULT_USE_VIRTUAL_GRPC_TRANSPORT_THREADS; - } - - @VisibleForTesting - static @Nullable String parseCredentials(String uri) { - String value = parseUriProperty(uri, CREDENTIALS_PROPERTY_NAME); - return value != null ? value : DEFAULT_CREDENTIALS; - } - - @VisibleForTesting - static @Nullable String parseEncodedCredentials(String uri) { - String encodedCredentials = parseUriProperty(uri, ENCODED_CREDENTIALS_PROPERTY_NAME); - checkGuardedProperty( - encodedCredentials, - ENABLE_ENCODED_CREDENTIALS_SYSTEM_PROPERTY, - ENCODED_CREDENTIALS_PROPERTY_NAME); - return encodedCredentials; - } - - @VisibleForTesting - static @Nullable CredentialsProvider parseCredentialsProvider(String uri) { - String credentialsProviderName = parseUriProperty(uri, CREDENTIALS_PROVIDER_PROPERTY_NAME); - checkGuardedProperty( - credentialsProviderName, - ENABLE_CREDENTIALS_PROVIDER_SYSTEM_PROPERTY, - CREDENTIALS_PROVIDER_PROPERTY_NAME); - if (!Strings.isNullOrEmpty(credentialsProviderName)) { - try { - Class clazz = - (Class) Class.forName(credentialsProviderName); - Constructor constructor = clazz.getDeclaredConstructor(); - return constructor.newInstance(); - } catch (ClassNotFoundException classNotFoundException) { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.INVALID_ARGUMENT, - "Unknown or invalid CredentialsProvider class name: " + credentialsProviderName, - classNotFoundException); - } catch (NoSuchMethodException noSuchMethodException) { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.INVALID_ARGUMENT, - "Credentials provider " - + credentialsProviderName - + " does not have a public no-arg constructor.", - noSuchMethodException); - } catch (InvocationTargetException - | InstantiationException - | IllegalAccessException exception) { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.INVALID_ARGUMENT, - "Failed to create an instance of " - + credentialsProviderName - + ": " - + exception.getMessage(), - exception); - } - } - return null; - } - private static void checkGuardedProperty( String value, String systemPropertyName, String connectionPropertyName) { if (!Strings.isNullOrEmpty(value) @@ -1024,196 +987,6 @@ private static void checkGuardedProperty( } } - @VisibleForTesting - static @Nullable String parseOAuthToken(String uri) { - String value = parseUriProperty(uri, OAUTH_TOKEN_PROPERTY_NAME); - return value != null ? value : DEFAULT_OAUTH_TOKEN; - } - - @VisibleForTesting - static String parseMinSessions(String uri) { - String value = parseUriProperty(uri, MIN_SESSIONS_PROPERTY_NAME); - return value != null ? value : DEFAULT_MIN_SESSIONS; - } - - @VisibleForTesting - static String parseMaxSessions(String uri) { - String value = parseUriProperty(uri, MAX_SESSIONS_PROPERTY_NAME); - return value != null ? value : DEFAULT_MAX_SESSIONS; - } - - @VisibleForTesting - static String parseNumChannels(String uri) { - String value = parseUriProperty(uri, NUM_CHANNELS_PROPERTY_NAME); - return value != null ? value : DEFAULT_NUM_CHANNELS; - } - - private static String parseEndpoint(String uri) { - String value = parseUriProperty(uri, ENDPOINT_PROPERTY_NAME); - return value != null ? value : DEFAULT_ENDPOINT; - } - - @VisibleForTesting - static String parseChannelProvider(String uri) { - String value = parseUriProperty(uri, CHANNEL_PROVIDER_PROPERTY_NAME); - checkGuardedProperty( - value, ENABLE_CHANNEL_PROVIDER_SYSTEM_PROPERTY, CHANNEL_PROVIDER_PROPERTY_NAME); - return value != null ? value : DEFAULT_CHANNEL_PROVIDER; - } - - @VisibleForTesting - static String parseDatabaseRole(String uri) { - String value = parseUriProperty(uri, DATABASE_ROLE_PROPERTY_NAME); - return value != null ? value : DEFAULT_DATABASE_ROLE; - } - - @VisibleForTesting - static String parseUserAgent(String uri) { - String value = parseUriProperty(uri, USER_AGENT_PROPERTY_NAME); - return value != null ? value : DEFAULT_USER_AGENT; - } - - @VisibleForTesting - static String parseOptimizerVersion(String uri) { - String value = parseUriProperty(uri, OPTIMIZER_VERSION_PROPERTY_NAME); - return value != null ? value : DEFAULT_OPTIMIZER_VERSION; - } - - @VisibleForTesting - static String parseOptimizerStatisticsPackage(String uri) { - String value = parseUriProperty(uri, OPTIMIZER_STATISTICS_PACKAGE_PROPERTY_NAME); - return value != null ? value : DEFAULT_OPTIMIZER_STATISTICS_PACKAGE; - } - - @VisibleForTesting - static boolean parseReturnCommitStats(String uri) { - String value = parseUriProperty(uri, "returnCommitStats"); - return Boolean.parseBoolean(value); - } - - @VisibleForTesting - static Long parseMaxCommitDelay(String uri) { - String value = parseUriProperty(uri, "maxCommitDelay"); - try { - Long millis = value == null ? null : Long.valueOf(value); - if (millis != null && millis < 0L) { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.INVALID_ARGUMENT, "maxCommitDelay must be >=0"); - } - return millis; - } catch (NumberFormatException numberFormatException) { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.INVALID_ARGUMENT, - "Invalid value for maxCommitDelay: " - + value - + "\n" - + "The value must be a positive integer indicating the number of " - + "milliseconds to use as the max delay."); - } - } - - static boolean parseAutoConfigEmulator(String uri) { - String value = parseUriProperty(uri, "autoConfigEmulator"); - return Boolean.parseBoolean(value); - } - - @VisibleForTesting - static Dialect parseDialect(String uri) { - String value = parseUriProperty(uri, DIALECT_PROPERTY_NAME); - return value != null ? Dialect.valueOf(value.toUpperCase()) : Dialect.GOOGLE_STANDARD_SQL; - } - - @VisibleForTesting - static boolean parseLenient(String uri) { - String value = parseUriProperty(uri, LENIENT_PROPERTY_NAME); - return value != null ? Boolean.parseBoolean(value) : DEFAULT_LENIENT; - } - - @VisibleForTesting - static boolean parseDelayTransactionStartUntilFirstWrite(String uri) { - String value = parseUriProperty(uri, DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE_NAME); - return value != null - ? Boolean.parseBoolean(value) - : DEFAULT_DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE; - } - - @VisibleForTesting - static boolean parseTrackSessionLeaks(String uri) { - String value = parseUriProperty(uri, TRACK_SESSION_LEAKS_PROPERTY_NAME); - return value != null ? Boolean.parseBoolean(value) : DEFAULT_TRACK_SESSION_LEAKS; - } - - @VisibleForTesting - static boolean parseTrackConnectionLeaks(String uri) { - String value = parseUriProperty(uri, TRACK_CONNECTION_LEAKS_PROPERTY_NAME); - return value != null ? Boolean.parseBoolean(value) : DEFAULT_TRACK_CONNECTION_LEAKS; - } - - @VisibleForTesting - static boolean parseDataBoostEnabled(String uri) { - String value = parseUriProperty(uri, DATA_BOOST_ENABLED_PROPERTY_NAME); - return value != null ? Boolean.parseBoolean(value) : DEFAULT_DATA_BOOST_ENABLED; - } - - @VisibleForTesting - static boolean parseAutoPartitionMode(String uri) { - String value = parseUriProperty(uri, AUTO_PARTITION_MODE_PROPERTY_NAME); - return value != null ? Boolean.parseBoolean(value) : DEFAULT_AUTO_PARTITION_MODE; - } - - @VisibleForTesting - static int parseMaxPartitions(String uri) { - String stringValue = parseUriProperty(uri, MAX_PARTITIONS_PROPERTY_NAME); - if (stringValue == null) { - return DEFAULT_MAX_PARTITIONS; - } - try { - int value = Integer.parseInt(stringValue); - if (value < 0) { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.INVALID_ARGUMENT, "maxPartitions must be >=0"); - } - return value; - } catch (NumberFormatException numberFormatException) { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.INVALID_ARGUMENT, "Invalid value for maxPartitions: " + stringValue); - } - } - - @VisibleForTesting - static int parseMaxPartitionedParallelism(String uri) { - String stringValue = parseUriProperty(uri, MAX_PARTITIONED_PARALLELISM_PROPERTY_NAME); - if (stringValue == null) { - return DEFAULT_MAX_PARTITIONED_PARALLELISM; - } - try { - int value = Integer.parseInt(stringValue); - if (value < 0) { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.INVALID_ARGUMENT, "maxPartitionedParallelism must be >=0"); - } - return value; - } catch (NumberFormatException numberFormatException) { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.INVALID_ARGUMENT, - "Invalid value for maxPartitionedParallelism: " + stringValue); - } - } - - @VisibleForTesting - static RpcPriority parseRPCPriority(String uri) { - String value = parseUriProperty(uri, RPC_PRIORITY_NAME); - return value != null ? RpcPriority.valueOf(value) : DEFAULT_RPC_PRIORITY; - } - - @VisibleForTesting - static DdlInTransactionMode parseDdlInTransactionMode(String uri) { - String value = parseUriProperty(uri, DDL_IN_TRANSACTION_MODE_PROPERTY_NAME); - return value != null - ? DdlInTransactionMode.valueOf(value.toUpperCase()) - : DEFAULT_DDL_IN_TRANSACTION_MODE; - } - @VisibleForTesting static String parseUriProperty(String uri, String property) { Pattern pattern = Pattern.compile(String.format("(?is)(?:;|\\?)%s=(.*?)(?:;|$)", property)); @@ -1226,23 +999,23 @@ static String parseUriProperty(String uri, String property) { /** Check that only valid properties have been specified. */ @VisibleForTesting - static String checkValidProperties(String uri) { - String invalidProperties = ""; + static String checkValidProperties(boolean lenient, String uri) { + StringBuilder invalidProperties = new StringBuilder(); List properties = parseProperties(uri); - boolean lenient = parseLenient(uri); for (String property : properties) { - if (!INTERNAL_VALID_PROPERTIES.contains(ConnectionProperty.createEmptyProperty(property))) { + if (!ConnectionProperties.CONNECTION_PROPERTIES.containsKey( + property.toLowerCase(Locale.ENGLISH))) { if (invalidProperties.length() > 0) { - invalidProperties = invalidProperties + ", "; + invalidProperties.append(", "); } - invalidProperties = invalidProperties + property; + invalidProperties.append(property); } } if (lenient) { return String.format("Invalid properties found in connection URI: %s", invalidProperties); } else { Preconditions.checkArgument( - invalidProperties.isEmpty(), + invalidProperties.length() == 0, String.format( "Invalid properties found in connection URI. Add lenient=true to the connection string to ignore unknown properties. Invalid properties: %s", invalidProperties)); @@ -1261,6 +1034,14 @@ static List parseProperties(String uri) { return res; } + static long tryParseLong(String value, long defaultValue) { + try { + return Long.parseLong(value); + } catch (NumberFormatException ignore) { + return defaultValue; + } + } + /** * Create a new {@link Connection} from this {@link ConnectionOptions}. Calling this method * multiple times for the same {@link ConnectionOptions} will return multiple instances of {@link @@ -1278,13 +1059,23 @@ public String getUri() { return uri; } + /** The connection properties that have been pre-set for this {@link ConnectionOptions}. */ + Map> getInitialConnectionPropertyValues() { + return this.initialConnectionState.getAllValues(); + } + + T getInitialConnectionPropertyValue( + com.google.cloud.spanner.connection.ConnectionProperty property) { + return this.initialConnectionState.getValue(property).getValue(); + } + /** The credentials URL of this {@link ConnectionOptions} */ public String getCredentialsUrl() { - return credentialsUrl; + return getInitialConnectionPropertyValue(CREDENTIALS_URL); } String getOAuthToken() { - return this.oauthToken; + return getInitialConnectionPropertyValue(OAUTH_TOKEN); } Credentials getFixedCredentials() { @@ -1292,7 +1083,7 @@ Credentials getFixedCredentials() { } CredentialsProvider getCredentialsProvider() { - return this.credentialsProvider; + return getInitialConnectionPropertyValue(CREDENTIALS_PROVIDER); } /** The {@link SessionPoolOptions} of this {@link ConnectionOptions}. */ @@ -1306,7 +1097,7 @@ public SessionPoolOptions getSessionPoolOptions() { * database using the same connection settings. */ public Integer getMinSessions() { - return minSessions; + return getInitialConnectionPropertyValue(MIN_SESSIONS); } /** @@ -1315,16 +1106,17 @@ public Integer getMinSessions() { * database using the same connection settings. */ public Integer getMaxSessions() { - return maxSessions; + return getInitialConnectionPropertyValue(MAX_SESSIONS); } /** The number of channels to use for the connection. */ public Integer getNumChannels() { - return numChannels; + return getInitialConnectionPropertyValue(NUM_CHANNELS); } /** Calls the getChannelProvider() method from the supplied class. */ public TransportChannelProvider getChannelProvider() { + String channelProvider = getInitialConnectionPropertyValue(CHANNEL_PROVIDER); if (channelProvider == null) { return null; } @@ -1347,7 +1139,7 @@ public TransportChannelProvider getChannelProvider() { * used to for example restrict the access of a connection to a specific set of tables. */ public String getDatabaseRole() { - return databaseRole; + return getInitialConnectionPropertyValue(DATABASE_ROLE); } /** The host and port number that this {@link ConnectionOptions} will connect to */ @@ -1388,12 +1180,12 @@ public Credentials getCredentials() { /** The initial autocommit value for connections created by this {@link ConnectionOptions} */ public boolean isAutocommit() { - return autocommit; + return getInitialConnectionPropertyValue(AUTOCOMMIT); } /** The initial readonly value for connections created by this {@link ConnectionOptions} */ public boolean isReadOnly() { - return readOnly; + return getInitialConnectionPropertyValue(READONLY); } /** @@ -1401,7 +1193,7 @@ public boolean isReadOnly() { * region. */ public boolean isRouteToLeader() { - return routeToLeader; + return getInitialConnectionPropertyValue(ROUTE_TO_LEADER); } /** @@ -1409,17 +1201,17 @@ public boolean isRouteToLeader() { * ConnectionOptions} */ public boolean isRetryAbortsInternally() { - return retryAbortsInternally; + return getInitialConnectionPropertyValue(RETRY_ABORTS_INTERNALLY); } /** Whether connections should use virtual threads for connection executors. */ public boolean isUseVirtualThreads() { - return useVirtualThreads; + return getInitialConnectionPropertyValue(USE_VIRTUAL_THREADS); } /** Whether virtual threads should be used for gRPC transport. */ public boolean isUseVirtualGrpcTransportThreads() { - return useVirtualGrpcTransportThreads; + return getInitialConnectionPropertyValue(USE_VIRTUAL_GRPC_TRANSPORT_THREADS); } /** Any warnings that were generated while creating the {@link ConnectionOptions} instance. */ @@ -1430,7 +1222,8 @@ public String getWarnings() { /** Use http instead of https. Only valid for (local) test servers. */ boolean isUsePlainText() { - return usePlainText; + return getInitialConnectionPropertyValue(AUTO_CONFIG_EMULATOR) + || getInitialConnectionPropertyValue(USE_PLAIN_TEXT); } /** @@ -1438,22 +1231,25 @@ boolean isUsePlainText() { * default JDBC user agent string will be used. */ String getUserAgent() { - return userAgent; - } - - /** The {@link QueryOptions} to use for the connection. */ - QueryOptions getQueryOptions() { - return queryOptions; + return getInitialConnectionPropertyValue(USER_AGENT); } /** Whether connections created by this {@link ConnectionOptions} return commit stats. */ public boolean isReturnCommitStats() { - return returnCommitStats; + return getInitialConnectionPropertyValue(RETURN_COMMIT_STATS); } /** The max_commit_delay that should be applied to commit operations on this connection. */ public Duration getMaxCommitDelay() { - return maxCommitDelay == null ? null : Duration.ofMillis(maxCommitDelay); + return getInitialConnectionPropertyValue(MAX_COMMIT_DELAY); + } + + boolean usesEmulator() { + return Suppliers.memoize( + () -> + isAutoConfigEmulator() + || !Strings.isNullOrEmpty(System.getenv("SPANNER_EMULATOR_HOST"))) + .get(); } /** @@ -1463,58 +1259,49 @@ public Duration getMaxCommitDelay() { * emulator instance. */ public boolean isAutoConfigEmulator() { - return autoConfigEmulator; + return getInitialConnectionPropertyValue(AUTO_CONFIG_EMULATOR); } /** * Returns true if a connection should generate auto-savepoints for retrying transactions on the * emulator. This allows some more concurrent transactions on the emulator. + * + *

    This is no longer needed since version 1.5.23 of the emulator. */ boolean useAutoSavepointsForEmulator() { - // For now, this option is directly linked to the option autoConfigEmulator=true, which is the - // recommended way to configure the emulator for the Connection API. - return autoConfigEmulator; + return getInitialConnectionPropertyValue(USE_AUTO_SAVEPOINTS_FOR_EMULATOR); } public Dialect getDialect() { - return dialect; - } - - /** The {@link RpcPriority} to use for the connection. */ - RpcPriority getRPCPriority() { - return rpcPriority; - } - - DdlInTransactionMode getDdlInTransactionMode() { - return this.ddlInTransactionMode; - } - - /** - * Whether connections created by this {@link ConnectionOptions} should delay the actual start of - * a read/write transaction until the first write operation. - */ - boolean isDelayTransactionStartUntilFirstWrite() { - return delayTransactionStartUntilFirstWrite; + return getInitialConnectionPropertyValue(DIALECT); } boolean isTrackConnectionLeaks() { - return this.trackConnectionLeaks; + return getInitialConnectionPropertyValue(TRACK_CONNECTION_LEAKS); } boolean isDataBoostEnabled() { - return this.dataBoostEnabled; + return getInitialConnectionPropertyValue(DATA_BOOST_ENABLED); } boolean isAutoPartitionMode() { - return this.autoPartitionMode; + return getInitialConnectionPropertyValue(AUTO_PARTITION_MODE); } int getMaxPartitions() { - return this.maxPartitions; + return getInitialConnectionPropertyValue(MAX_PARTITIONS); } int getMaxPartitionedParallelism() { - return this.maxPartitionedParallelism; + return getInitialConnectionPropertyValue(MAX_PARTITIONED_PARALLELISM); + } + + Boolean isEnableExtendedTracing() { + return getInitialConnectionPropertyValue(ENABLE_EXTENDED_TRACING); + } + + Boolean isEnableApiTracing() { + return getInitialConnectionPropertyValue(ENABLE_API_TRACING); } /** Interceptors that should be executed after each statement */ diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionProperties.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionProperties.java new file mode 100644 index 00000000000..b18326e015d --- /dev/null +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionProperties.java @@ -0,0 +1,515 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.ConnectionOptions.AUTOCOMMIT_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.AUTO_PARTITION_MODE_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.CHANNEL_PROVIDER_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.CREDENTIALS_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.CREDENTIALS_PROVIDER_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.DATABASE_ROLE_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.DATA_BOOST_ENABLED_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.DDL_IN_TRANSACTION_MODE_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_AUTOCOMMIT; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_AUTO_PARTITION_MODE; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_CHANNEL_PROVIDER; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_CREDENTIALS; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_DATABASE_ROLE; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_DATA_BOOST_ENABLED; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_DDL_IN_TRANSACTION_MODE; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_ENABLE_API_TRACING; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_ENABLE_EXTENDED_TRACING; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_ENDPOINT; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_KEEP_TRANSACTION_ALIVE; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_LENIENT; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_MAX_PARTITIONED_PARALLELISM; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_MAX_PARTITIONS; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_MAX_SESSIONS; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_MIN_SESSIONS; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_NUM_CHANNELS; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_OAUTH_TOKEN; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_OPTIMIZER_STATISTICS_PACKAGE; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_OPTIMIZER_VERSION; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_READONLY; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_RETRY_ABORTS_INTERNALLY; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_RETURN_COMMIT_STATS; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_ROUTE_TO_LEADER; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_RPC_PRIORITY; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_TRACK_CONNECTION_LEAKS; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_TRACK_SESSION_LEAKS; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_USER_AGENT; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_USE_PLAIN_TEXT; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_USE_VIRTUAL_GRPC_TRANSPORT_THREADS; +import static com.google.cloud.spanner.connection.ConnectionOptions.DEFAULT_USE_VIRTUAL_THREADS; +import static com.google.cloud.spanner.connection.ConnectionOptions.DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.DIALECT_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.ENABLE_API_TRACING_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.ENABLE_EXTENDED_TRACING_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.ENCODED_CREDENTIALS_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.ENDPOINT_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.KEEP_TRANSACTION_ALIVE_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.LENIENT_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.MAX_PARTITIONED_PARALLELISM_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.MAX_PARTITIONS_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.MAX_SESSIONS_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.MIN_SESSIONS_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.NUM_CHANNELS_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.OAUTH_TOKEN_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.OPTIMIZER_STATISTICS_PACKAGE_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.OPTIMIZER_VERSION_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.READONLY_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.RETRY_ABORTS_INTERNALLY_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.ROUTE_TO_LEADER_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.RPC_PRIORITY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.TRACK_CONNECTION_LEAKS_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.TRACK_SESSION_LEAKS_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.USER_AGENT_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.USE_PLAIN_TEXT_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.USE_VIRTUAL_GRPC_TRANSPORT_THREADS_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionOptions.USE_VIRTUAL_THREADS_PROPERTY_NAME; +import static com.google.cloud.spanner.connection.ConnectionProperty.castProperty; + +import com.google.api.gax.core.CredentialsProvider; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.Options.RpcPriority; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.AutocommitDmlModeConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.BooleanConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.ConnectionStateTypeConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.CredentialsProviderConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.DdlInTransactionModeConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.DialectConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.DurationConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.NonNegativeIntegerConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.ReadOnlyStalenessConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.RpcPriorityConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.SavepointSupportConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.StringValueConverter; +import com.google.cloud.spanner.connection.ConnectionProperty.Context; +import com.google.cloud.spanner.connection.DirectedReadOptionsUtil.DirectedReadOptionsConverter; +import com.google.common.collect.ImmutableMap; +import com.google.spanner.v1.DirectedReadOptions; +import java.time.Duration; +import java.util.Map; + +/** + * Utility class that defines all known connection properties. This class will eventually replace + * the list of {@link com.google.cloud.spanner.connection.ConnectionOptions.ConnectionProperty} in + * {@link ConnectionOptions}. + */ +class ConnectionProperties { + private static final ImmutableMap.Builder> + CONNECTION_PROPERTIES_BUILDER = ImmutableMap.builder(); + + static final ConnectionProperty CONNECTION_STATE_TYPE = + create( + "connection_state_type", + "The type of connection state to use for this connection. Can only be set at start up. " + + "If no value is set, then the database dialect default will be used, " + + "which is NON_TRANSACTIONAL for GoogleSQL and TRANSACTIONAL for PostgreSQL.", + null, + ConnectionStateTypeConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty TRACING_PREFIX = + create( + "tracing_prefix", + "The prefix that will be prepended to all OpenTelemetry traces that are " + + "generated by a Connection.", + "CloudSpanner", + StringValueConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty LENIENT = + create( + LENIENT_PROPERTY_NAME, + "Silently ignore unknown properties in the connection string/properties (true/false)", + DEFAULT_LENIENT, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty ENDPOINT = + create( + ENDPOINT_PROPERTY_NAME, + "The endpoint that the JDBC driver should connect to. " + + "The default is the default Spanner production endpoint when autoConfigEmulator=false, " + + "and the default Spanner emulator endpoint (localhost:9010) when autoConfigEmulator=true. " + + "This property takes precedence over any host name at the start of the connection URL.", + DEFAULT_ENDPOINT, + StringValueConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty AUTO_CONFIG_EMULATOR = + create( + "autoConfigEmulator", + "Automatically configure the connection to try to connect to the Cloud Spanner emulator (true/false). " + + "The instance and database in the connection string will automatically be created if these do not yet exist on the emulator. " + + "Add dialect=postgresql to the connection string to make sure that the database that is created uses the PostgreSQL dialect.", + false, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty USE_AUTO_SAVEPOINTS_FOR_EMULATOR = + create( + "useAutoSavepointsForEmulator", + "Automatically creates savepoints for each statement in a read/write transaction when using the Emulator. " + + "This is no longer needed when using Emulator version 1.5.23 or higher.", + false, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty USE_PLAIN_TEXT = + create( + USE_PLAIN_TEXT_PROPERTY_NAME, + "Use a plain text communication channel (i.e. non-TLS) for communicating with the server (true/false). Set this value to true for communication with the Cloud Spanner emulator.", + DEFAULT_USE_PLAIN_TEXT, + BooleanConverter.INSTANCE, + Context.STARTUP); + + static final ConnectionProperty CREDENTIALS_URL = + create( + CREDENTIALS_PROPERTY_NAME, + "The location of the credentials file to use for this connection. If neither this property or encoded credentials are set, the connection will use the default Google Cloud credentials for the runtime environment.", + DEFAULT_CREDENTIALS, + StringValueConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty ENCODED_CREDENTIALS = + create( + ENCODED_CREDENTIALS_PROPERTY_NAME, + "Base64-encoded credentials to use for this connection. If neither this property or a credentials location are set, the connection will use the default Google Cloud credentials for the runtime environment.", + null, + StringValueConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty OAUTH_TOKEN = + create( + OAUTH_TOKEN_PROPERTY_NAME, + "A valid pre-existing OAuth token to use for authentication for this connection. Setting this property will take precedence over any value set for a credentials file.", + DEFAULT_OAUTH_TOKEN, + StringValueConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty CREDENTIALS_PROVIDER = + create( + CREDENTIALS_PROVIDER_PROPERTY_NAME, + "The class name of the com.google.api.gax.core.CredentialsProvider implementation that should be used to obtain credentials for connections.", + null, + CredentialsProviderConverter.INSTANCE, + Context.STARTUP); + + static final ConnectionProperty USER_AGENT = + create( + USER_AGENT_PROPERTY_NAME, + "The custom user-agent property name to use when communicating with Cloud Spanner. This property is intended for internal library usage, and should not be set by applications.", + DEFAULT_USER_AGENT, + StringValueConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty DIALECT = + create( + DIALECT_PROPERTY_NAME, + "Sets the dialect to use for new databases that are created by this connection.", + Dialect.GOOGLE_STANDARD_SQL, + DialectConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty TRACK_SESSION_LEAKS = + create( + TRACK_SESSION_LEAKS_PROPERTY_NAME, + "Capture the call stack of the thread that checked out a session of the session pool. This will " + + "pre-create a LeakedSessionException already when a session is checked out. This can be disabled, " + + "for example if a monitoring system logs the pre-created exception. " + + "If disabled, the LeakedSessionException will only be created when an " + + "actual session leak is detected. The stack trace of the exception will " + + "in that case not contain the call stack of when the session was checked out.", + DEFAULT_TRACK_SESSION_LEAKS, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty TRACK_CONNECTION_LEAKS = + create( + TRACK_CONNECTION_LEAKS_PROPERTY_NAME, + "Capture the call stack of the thread that created a connection. This will " + + "pre-create a LeakedConnectionException already when a connection is created. " + + "This can be disabled, for example if a monitoring system logs the pre-created exception. " + + "If disabled, the LeakedConnectionException will only be created when an " + + "actual connection leak is detected. The stack trace of the exception will " + + "in that case not contain the call stack of when the connection was created.", + DEFAULT_TRACK_CONNECTION_LEAKS, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty ROUTE_TO_LEADER = + create( + ROUTE_TO_LEADER_PROPERTY_NAME, + "Should read/write transactions and partitioned DML be routed to leader region (true/false)", + DEFAULT_ROUTE_TO_LEADER, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty USE_VIRTUAL_THREADS = + create( + USE_VIRTUAL_THREADS_PROPERTY_NAME, + "Use a virtual thread instead of a platform thread for each connection (true/false). " + + "This option only has any effect if the application is running on Java 21 or higher. In all other cases, the option is ignored.", + DEFAULT_USE_VIRTUAL_THREADS, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty USE_VIRTUAL_GRPC_TRANSPORT_THREADS = + create( + USE_VIRTUAL_GRPC_TRANSPORT_THREADS_PROPERTY_NAME, + "Use a virtual thread instead of a platform thread for the gRPC executor (true/false). " + + "This option only has any effect if the application is running on Java 21 or higher. In all other cases, the option is ignored.", + DEFAULT_USE_VIRTUAL_GRPC_TRANSPORT_THREADS, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty ENABLE_EXTENDED_TRACING = + create( + ENABLE_EXTENDED_TRACING_PROPERTY_NAME, + "Include the SQL string in the OpenTelemetry traces that are generated " + + "by this connection. The SQL string is added as the standard OpenTelemetry " + + "attribute 'db.statement'.", + DEFAULT_ENABLE_EXTENDED_TRACING, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty ENABLE_API_TRACING = + create( + ENABLE_API_TRACING_PROPERTY_NAME, + "Add OpenTelemetry traces for each individual RPC call. Enable this " + + "to get a detailed view of each RPC that is being executed by your application, " + + "or if you want to debug potential latency problems caused by RPCs that are " + + "being retried.", + DEFAULT_ENABLE_API_TRACING, + BooleanConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty MIN_SESSIONS = + create( + MIN_SESSIONS_PROPERTY_NAME, + "The minimum number of sessions in the backing session pool. The default is 100.", + DEFAULT_MIN_SESSIONS, + NonNegativeIntegerConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty MAX_SESSIONS = + create( + MAX_SESSIONS_PROPERTY_NAME, + "The maximum number of sessions in the backing session pool. The default is 400.", + DEFAULT_MAX_SESSIONS, + NonNegativeIntegerConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty NUM_CHANNELS = + create( + NUM_CHANNELS_PROPERTY_NAME, + "The number of gRPC channels to use to communicate with Cloud Spanner. The default is 4.", + DEFAULT_NUM_CHANNELS, + NonNegativeIntegerConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty CHANNEL_PROVIDER = + create( + CHANNEL_PROVIDER_PROPERTY_NAME, + "The name of the channel provider class. The name must reference an implementation of ExternalChannelProvider. If this property is not set, the connection will use the default grpc channel provider.", + DEFAULT_CHANNEL_PROVIDER, + StringValueConverter.INSTANCE, + Context.STARTUP); + static final ConnectionProperty DATABASE_ROLE = + create( + DATABASE_ROLE_PROPERTY_NAME, + "Sets the database role to use for this connection. The default is privileges assigned to IAM role", + DEFAULT_DATABASE_ROLE, + StringValueConverter.INSTANCE, + Context.STARTUP); + + static final ConnectionProperty AUTOCOMMIT = + create( + AUTOCOMMIT_PROPERTY_NAME, + "Should the connection start in autocommit (true/false)", + DEFAULT_AUTOCOMMIT, + BooleanConverter.INSTANCE, + Context.USER); + static final ConnectionProperty READONLY = + create( + READONLY_PROPERTY_NAME, + "Should the connection start in read-only mode (true/false)", + DEFAULT_READONLY, + BooleanConverter.INSTANCE, + Context.USER); + static final ConnectionProperty AUTOCOMMIT_DML_MODE = + create( + "autocommit_dml_mode", + "Should the connection automatically retry Aborted errors (true/false)", + AutocommitDmlMode.TRANSACTIONAL, + AutocommitDmlModeConverter.INSTANCE, + Context.USER); + static final ConnectionProperty RETRY_ABORTS_INTERNALLY = + create( + // TODO: Add support for synonyms for connection properties. + // retryAbortsInternally / retry_aborts_internally is currently not consistent. + // The connection URL property is retryAbortsInternally. The SET statement assumes + // that the property name is retry_aborts_internally. We should support both to be + // backwards compatible, but the standard should be snake_case. + RETRY_ABORTS_INTERNALLY_PROPERTY_NAME, + "Should the connection automatically retry Aborted errors (true/false)", + DEFAULT_RETRY_ABORTS_INTERNALLY, + BooleanConverter.INSTANCE, + Context.USER); + static final ConnectionProperty RETURN_COMMIT_STATS = + create( + "returnCommitStats", + "Request that Spanner returns commit statistics for read/write transactions (true/false)", + DEFAULT_RETURN_COMMIT_STATS, + BooleanConverter.INSTANCE, + Context.USER); + static final ConnectionProperty DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE = + create( + DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE_NAME, + "Enabling this option will delay the actual start of a read/write transaction until the first write operation is seen in that transaction. " + + "All reads that happen before the first write in a transaction will instead be executed as if the connection was in auto-commit mode. " + + "Enabling this option will make read/write transactions lose their SERIALIZABLE isolation level. Read operations that are executed after " + + "the first write operation in a read/write transaction will be executed using the read/write transaction. Enabling this mode can reduce locking " + + "and improve performance for applications that can handle the lower transaction isolation semantics.", + DEFAULT_DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE, + BooleanConverter.INSTANCE, + Context.USER); + static final ConnectionProperty KEEP_TRANSACTION_ALIVE = + create( + KEEP_TRANSACTION_ALIVE_PROPERTY_NAME, + "Enabling this option will trigger the connection to keep read/write transactions alive by executing a SELECT 1 query once every 10 seconds " + + "if no other statements are being executed. This option should be used with caution, as it can keep transactions alive and hold on to locks " + + "longer than intended. This option should typically be used for CLI-type application that might wait for user input for a longer period of time.", + DEFAULT_KEEP_TRANSACTION_ALIVE, + BooleanConverter.INSTANCE, + Context.USER); + + static final ConnectionProperty READ_ONLY_STALENESS = + create( + "read_only_staleness", + "The read-only staleness to use for read-only transactions and single-use queries.", + TimestampBound.strong(), + ReadOnlyStalenessConverter.INSTANCE, + Context.USER); + static final ConnectionProperty AUTO_PARTITION_MODE = + create( + AUTO_PARTITION_MODE_PROPERTY_NAME, + "Execute all queries on this connection as partitioned queries. " + + "Executing a query that cannot be partitioned will fail. " + + "Executing a query in a read/write transaction will also fail.", + DEFAULT_AUTO_PARTITION_MODE, + BooleanConverter.INSTANCE, + Context.USER); + static final ConnectionProperty DATA_BOOST_ENABLED = + create( + DATA_BOOST_ENABLED_PROPERTY_NAME, + "Enable data boost for all partitioned queries that are executed by this connection. " + + "This setting is only used for partitioned queries and is ignored by all other statements.", + DEFAULT_DATA_BOOST_ENABLED, + BooleanConverter.INSTANCE, + Context.USER); + static final ConnectionProperty MAX_PARTITIONS = + create( + MAX_PARTITIONS_PROPERTY_NAME, + "The max partitions hint value to use for partitioned queries. " + + "Use 0 if you do not want to specify a hint.", + DEFAULT_MAX_PARTITIONS, + NonNegativeIntegerConverter.INSTANCE, + Context.USER); + static final ConnectionProperty MAX_PARTITIONED_PARALLELISM = + create( + MAX_PARTITIONED_PARALLELISM_PROPERTY_NAME, + "The max partitions hint value to use for partitioned queries. " + + "Use 0 if you do not want to specify a hint.", + DEFAULT_MAX_PARTITIONED_PARALLELISM, + NonNegativeIntegerConverter.INSTANCE, + Context.USER); + + static final ConnectionProperty DIRECTED_READ = + create( + "directed_read", + "The directed read options to apply to read-only transactions.", + null, + DirectedReadOptionsConverter.INSTANCE, + Context.USER); + static final ConnectionProperty OPTIMIZER_VERSION = + create( + OPTIMIZER_VERSION_PROPERTY_NAME, + "Sets the default query optimizer version to use for this connection.", + DEFAULT_OPTIMIZER_VERSION, + StringValueConverter.INSTANCE, + Context.USER); + static final ConnectionProperty OPTIMIZER_STATISTICS_PACKAGE = + create( + OPTIMIZER_STATISTICS_PACKAGE_PROPERTY_NAME, + "Sets the query optimizer statistics package to use for this connection.", + DEFAULT_OPTIMIZER_STATISTICS_PACKAGE, + StringValueConverter.INSTANCE, + Context.USER); + static final ConnectionProperty RPC_PRIORITY = + create( + RPC_PRIORITY_NAME, + "Sets the priority for all RPC invocations from this connection (HIGH/MEDIUM/LOW). The default is HIGH.", + DEFAULT_RPC_PRIORITY, + RpcPriorityConverter.INSTANCE, + Context.USER); + static final ConnectionProperty SAVEPOINT_SUPPORT = + create( + "savepoint_support", + "Determines the behavior of the connection when savepoints are used.", + SavepointSupport.FAIL_AFTER_ROLLBACK, + SavepointSupportConverter.INSTANCE, + Context.USER); + static final ConnectionProperty DDL_IN_TRANSACTION_MODE = + create( + DDL_IN_TRANSACTION_MODE_PROPERTY_NAME, + "Determines how the connection should handle DDL statements in a read/write transaction.", + DEFAULT_DDL_IN_TRANSACTION_MODE, + DdlInTransactionModeConverter.INSTANCE, + Context.USER); + static final ConnectionProperty MAX_COMMIT_DELAY = + create( + "maxCommitDelay", + "The max delay that Spanner may apply to commit requests to improve throughput.", + null, + DurationConverter.INSTANCE, + Context.USER); + + static final Map> CONNECTION_PROPERTIES = + CONNECTION_PROPERTIES_BUILDER.build(); + + /** Utility method for creating a new core {@link ConnectionProperty}. */ + private static ConnectionProperty create( + String name, + String description, + T defaultValue, + ClientSideStatementValueConverter converter, + Context context) { + ConnectionProperty property = + ConnectionProperty.create(name, description, defaultValue, converter, context); + CONNECTION_PROPERTIES_BUILDER.put(property.getKey(), property); + return property; + } + + /** Parse the connection properties that can be found in the given connection URL. */ + static ImmutableMap> parseValues(String url) { + ImmutableMap.Builder> builder = ImmutableMap.builder(); + for (ConnectionProperty property : CONNECTION_PROPERTIES.values()) { + ConnectionPropertyValue value = parseValue(castProperty(property), url); + if (value != null) { + builder.put(property.getKey(), value); + } + } + return builder.build(); + } + + /** + * Parse and convert the value of the specific connection property from a connection URL (e.g. + * readonly=true). + */ + private static ConnectionPropertyValue parseValue( + ConnectionProperty property, String url) { + String stringValue = ConnectionOptions.parseUriProperty(url, property.getKey()); + return property.convert(stringValue); + } + + /** This class should not be instantiated. */ + private ConnectionProperties() {} +} diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionProperty.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionProperty.java new file mode 100644 index 00000000000..c203d44203b --- /dev/null +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionProperty.java @@ -0,0 +1,197 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.common.base.Strings; +import java.util.Locale; +import javax.annotation.Nonnull; +import javax.annotation.Nullable; + +/** + * {@link ConnectionProperty} is a variable for a connection. The total set of connection properties + * is the state of a connection, and determine the behavior of that connection. For example, a + * connection with a {@link ConnectionProperty} READONLY=true and AUTOCOMMIT=false will use + * read-only transactions by default, while a connection with READONLY=false and AUTOCOMMIT=false + * will use read/write transactions. + * + *

    Connection properties are stored in a {@link ConnectionState} instance. {@link + * ConnectionState} can be transactional. That is; changes to a connection property during a + * transaction will be undone if the transaction is rolled back. Transactional connection state is + * the default for PostgreSQL-dialect databases. For GoogleSQL-dialect databases, transactional + * connection state is an opt-in. + */ +public class ConnectionProperty { + /** + * Context indicates when a {@link ConnectionProperty} may be set. Each higher-ordinal value + * includes the preceding values, meaning that a {@link ConnectionProperty} with {@link + * Context#USER} can be set both at connection startup and during the connection's lifetime. + */ + enum Context { + /** The property can only be set at startup of the connection. */ + STARTUP, + /** + * The property can be set at startup or by a user during the lifetime of a connection. The + * value is persisted until it is changed again by the user. + */ + USER, + } + + /** Utility method for doing an unchecked cast to a typed {@link ConnectionProperty}. */ + static ConnectionProperty castProperty(ConnectionProperty property) { + //noinspection unchecked + return (ConnectionProperty) property; + } + + /** + * Utility method for creating a key for a {@link ConnectionProperty}. The key of a property is + * always lower-case and consists of '[extension.]name'. + */ + @Nonnull + static String createKey(String extension, @Nonnull String name) { + ConnectionPreconditions.checkArgument( + !Strings.isNullOrEmpty(name), "property name must be a non-empty string"); + return extension == null + ? name.toLowerCase(Locale.ENGLISH) + : extension.toLowerCase(Locale.ENGLISH) + "." + name.toLowerCase(Locale.ENGLISH); + } + + /** Utility method for creating a typed {@link ConnectionProperty}. */ + @Nonnull + static ConnectionProperty create( + @Nonnull String name, + String description, + T defaultValue, + ClientSideStatementValueConverter converter, + Context context) { + return new ConnectionProperty<>( + null, name, description, defaultValue, null, converter, context); + } + + /** + * The 'extension' of this property. This is (currently) only used for PostgreSQL-dialect + * databases. + */ + private final String extension; + + @Nonnull private final String name; + + @Nonnull private final String key; + + @Nonnull private final String description; + + private final T defaultValue; + + private final T[] validValues; + + private final ClientSideStatementValueConverter converter; + + private final Context context; + + ConnectionProperty( + String extension, + @Nonnull String name, + @Nonnull String description, + T defaultValue, + T[] validValues, + ClientSideStatementValueConverter converter, + Context context) { + ConnectionPreconditions.checkArgument( + !Strings.isNullOrEmpty(name), "property name must be a non-empty string"); + ConnectionPreconditions.checkArgument( + !Strings.isNullOrEmpty(description), "property description must be a non-empty string"); + this.extension = extension == null ? null : extension.toLowerCase(Locale.ENGLISH); + this.name = name.toLowerCase(Locale.ENGLISH); + this.description = description; + this.defaultValue = defaultValue; + this.validValues = validValues; + this.converter = converter; + this.context = context; + this.key = createKey(this.extension, this.name); + } + + @Override + public String toString() { + return this.key; + } + + @Override + public int hashCode() { + return this.key.hashCode(); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ConnectionProperty)) { + return false; + } + ConnectionProperty other = (ConnectionProperty) o; + return this.key.equals(other.key); + } + + ConnectionPropertyValue createInitialValue(@Nullable ConnectionPropertyValue initialValue) { + return initialValue == null + ? new ConnectionPropertyValue<>(this, this.defaultValue, this.defaultValue) + : initialValue.copy(); + } + + @Nullable + ConnectionPropertyValue convert(@Nullable String stringValue) { + if (stringValue == null) { + return null; + } + T convertedValue = this.converter.convert(stringValue); + if (convertedValue == null) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "Invalid value for property " + this + ": " + stringValue); + } + return new ConnectionPropertyValue<>(this, convertedValue, convertedValue); + } + + String getKey() { + return this.key; + } + + boolean hasExtension() { + return this.extension != null; + } + + String getExtension() { + return this.extension; + } + + String getName() { + return this.name; + } + + String getDescription() { + return this.description; + } + + T getDefaultValue() { + return this.defaultValue; + } + + T[] getValidValues() { + return this.validValues; + } + + Context getContext() { + return this.context; + } +} diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionPropertyValue.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionPropertyValue.java new file mode 100644 index 00000000000..088a28d9d8a --- /dev/null +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionPropertyValue.java @@ -0,0 +1,80 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.connection.ConnectionProperty.Context; +import java.util.Objects; + +class ConnectionPropertyValue { + static ConnectionPropertyValue cast(ConnectionPropertyValue value) { + //noinspection unchecked + return (ConnectionPropertyValue) value; + } + + private final ConnectionProperty property; + private final T resetValue; + + private T value; + + ConnectionPropertyValue(ConnectionProperty property, T resetValue, T value) { + this.property = property; + this.resetValue = resetValue; + this.value = value; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ConnectionPropertyValue)) { + return false; + } + ConnectionPropertyValue other = cast((ConnectionPropertyValue) o); + return Objects.equals(this.property, other.property) + && Objects.equals(this.resetValue, other.resetValue) + && Objects.equals(this.value, other.value); + } + + @Override + public int hashCode() { + return Objects.hash(this.property, this.resetValue, this.value); + } + + ConnectionProperty getProperty() { + return property; + } + + T getResetValue() { + return resetValue; + } + + T getValue() { + return value; + } + + void setValue(T value, Context context) { + ConnectionPreconditions.checkState( + property.getContext().ordinal() >= context.ordinal(), + "Property has context " + + property.getContext() + + " and cannot be set in context " + + context); + this.value = value; + } + + ConnectionPropertyValue copy() { + return new ConnectionPropertyValue<>(this.property, this.resetValue, this.value); + } +} diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionState.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionState.java new file mode 100644 index 00000000000..b732d617c22 --- /dev/null +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionState.java @@ -0,0 +1,282 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.ConnectionProperties.CONNECTION_PROPERTIES; +import static com.google.cloud.spanner.connection.ConnectionProperties.CONNECTION_STATE_TYPE; +import static com.google.cloud.spanner.connection.ConnectionProperty.castProperty; +import static com.google.cloud.spanner.connection.ConnectionPropertyValue.cast; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.connection.ConnectionProperty.Context; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Suppliers; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; +import java.util.function.Supplier; +import javax.annotation.Nullable; + +class ConnectionState { + /** The type of connection state that is used. */ + enum Type { + /** + * Transactional connection state will roll back changes to connection properties that have been + * done during a transaction if the transaction is rolled back. + */ + TRANSACTIONAL, + /** + * Non-transactional connection state directly applies connection property changes during + * transactions to the main set of properties. Note that non-transactional connection state does + * support local properties. These are property changes that are only visible during the current + * transaction, and that are lost after committing or rolling back the current transaction. + */ + NON_TRANSACTIONAL, + } + + private final Object lock = new Object(); + + private final Supplier type; + + /** properties contain the current connection properties of a connection. */ + private final Map> properties; + + /** + * transactionProperties are the modified connection properties during a transaction. This is only + * used for {@link ConnectionState} that is marked as {@link Type#TRANSACTIONAL}. + */ + private Map> transactionProperties; + /** localProperties are the modified local properties during a transaction. */ + private Map> localProperties; + + /** Constructs a non-transactional {@link ConnectionState} with the given initial values. */ + ConnectionState(Map> initialValues) { + this(initialValues, Suppliers.ofInstance(Type.NON_TRANSACTIONAL)); + } + + /** + * Constructs a {@link ConnectionState} with the given initial values. The type will be + * transactional or non-transactional based on the value that is returned by the given supplier. + * The type is determined lazily to allow connections to determine the default based on the + * dialect, and the dialect is not known directly when a connection is created. + */ + ConnectionState( + Map> initialValues, + Supplier defaultConnectionStateTypeSupplier) { + this.properties = new HashMap<>(CONNECTION_PROPERTIES.size()); + for (Entry> entry : CONNECTION_PROPERTIES.entrySet()) { + this.properties.put( + entry.getKey(), + entry.getValue().createInitialValue(cast(initialValues.get(entry.getKey())))); + } + // Add any additional non-core values from the options. + for (Entry> entry : initialValues.entrySet()) { + if (!this.properties.containsKey(entry.getKey())) { + setValue( + castProperty(entry.getValue().getProperty()), + cast(entry.getValue()).getValue(), + Context.STARTUP, + /* inTransaction = */ false); + } + } + Type configuredType = getValue(CONNECTION_STATE_TYPE).getValue(); + if (configuredType == null) { + this.type = defaultConnectionStateTypeSupplier; + } else { + this.type = Suppliers.ofInstance(configuredType); + } + } + + @VisibleForTesting + Type getType() { + return this.type.get(); + } + + boolean hasTransactionalChanges() { + synchronized (lock) { + return this.transactionProperties != null || this.localProperties != null; + } + } + + /** + * Returns an unmodifiable map with all the property values of this {@link ConnectionState}. The + * map cannot be modified, but any changes to the current (committed) state will be reflected in + * the map that is returned by this method. + */ + Map> getAllValues() { + synchronized (lock) { + return Collections.unmodifiableMap(this.properties); + } + } + + /** Returns the current value of the specified setting. */ + ConnectionPropertyValue getValue(ConnectionProperty property) { + synchronized (lock) { + return internalGetValue(property, true); + } + } + + /** Returns the current value of the specified setting or null if undefined. */ + @Nullable + ConnectionPropertyValue tryGetValue(ConnectionProperty property) { + synchronized (lock) { + return internalGetValue(property, false); + } + } + + private ConnectionPropertyValue internalGetValue( + ConnectionProperty property, boolean throwForUnknownParam) { + if (localProperties != null && localProperties.containsKey(property.getKey())) { + return cast(localProperties.get(property.getKey())); + } + if (transactionProperties != null && transactionProperties.containsKey(property.getKey())) { + return cast(transactionProperties.get(property.getKey())); + } + if (properties.containsKey(property.getKey())) { + return cast(properties.get(property.getKey())); + } + if (throwForUnknownParam) { + throw unknownParamError(property); + } + return null; + } + + /** + * Sets the value of the specified property. The new value will be persisted if the current + * transaction is committed or directly if the connection state is non-transactional. The value + * will be lost if the transaction is rolled back and the connection state is transactional. + */ + void setValue( + ConnectionProperty property, T value, Context context, boolean inTransaction) { + ConnectionPreconditions.checkState( + property.getContext().ordinal() >= context.ordinal(), + "Property has context " + + property.getContext() + + " and cannot be set in context " + + context); + synchronized (lock) { + if (!inTransaction + || getType() == Type.NON_TRANSACTIONAL + || context.ordinal() < Context.USER.ordinal()) { + internalSetValue(property, value, properties, context); + return; + } + + if (transactionProperties == null) { + transactionProperties = new HashMap<>(); + } + internalSetValue(property, value, transactionProperties, context); + // Remove the setting from the local settings if it's there, as the new transaction setting is + // the one that should be used. + if (localProperties != null) { + localProperties.remove(property.getKey()); + } + } + } + + /** + * Sets the value of the specified setting for the current transaction. This value is lost when + * the transaction is committed or rolled back. This can be used to temporarily set a value only + * during a transaction, for example if a user wants to disable internal transaction retries only + * for a single transaction. + */ + void setLocalValue(ConnectionProperty property, T value) { + ConnectionPreconditions.checkState( + property.getContext().ordinal() >= Context.USER.ordinal(), + "setLocalValue is only supported for properties with context USER or higher."); + synchronized (lock) { + if (localProperties == null) { + localProperties = new HashMap<>(); + } + // Note that setting a local setting does not remove it from the transaction settings. This + // means that a commit will persist the setting in transactionSettings. + internalSetValue(property, value, localProperties, Context.USER); + } + } + + /** + * Resets the value of the specified property. The new value will be persisted if the current + * transaction is committed or directly if the connection state is non-transactional. The value + * will be lost if the transaction is rolled back and the connection state is transactional. + */ + void resetValue(ConnectionProperty property, Context context, boolean inTransaction) { + synchronized (lock) { + ConnectionPropertyValue currentValue = getValue(property); + if (currentValue == null) { + setValue(property, null, context, inTransaction); + } else { + setValue(property, currentValue.getResetValue(), context, inTransaction); + } + } + } + + /** Persists the new value for a property to the given map of properties. */ + private void internalSetValue( + ConnectionProperty property, + T value, + Map> currentProperties, + Context context) { + ConnectionPropertyValue newValue = cast(currentProperties.get(property.getKey())); + if (newValue == null) { + ConnectionPropertyValue existingValue = cast(properties.get(property.getKey())); + if (existingValue == null) { + if (!property.hasExtension()) { + throw unknownParamError(property); + } + newValue = new ConnectionPropertyValue(property, null, null); + } else { + newValue = existingValue.copy(); + } + } + newValue.setValue(value, context); + currentProperties.put(property.getKey(), newValue); + } + + /** Creates an exception for an unknown connection property. */ + static SpannerException unknownParamError(ConnectionProperty property) { + return SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, + String.format("unrecognized configuration property \"%s\"", property)); + } + + /** + * Commits the current transaction and persists any changes to the settings (except local + * changes). + */ + void commit() { + synchronized (lock) { + if (transactionProperties != null) { + for (ConnectionPropertyValue value : transactionProperties.values()) { + properties.put(value.getProperty().getKey(), value); + } + } + this.localProperties = null; + this.transactionProperties = null; + } + } + + /** Rolls back the current transaction and abandons any pending changes to the settings. */ + void rollback() { + synchronized (lock) { + this.localProperties = null; + this.transactionProperties = null; + } + } +} diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionStatementExecutor.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionStatementExecutor.java index cc4c53275b3..84e93e1a034 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionStatementExecutor.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionStatementExecutor.java @@ -16,12 +16,12 @@ package com.google.cloud.spanner.connection; +import com.google.cloud.spanner.Options.RpcPriority; import com.google.cloud.spanner.Statement; import com.google.cloud.spanner.TimestampBound; import com.google.cloud.spanner.connection.PgTransactionMode.IsolationLevel; -import com.google.protobuf.Duration; import com.google.spanner.v1.DirectedReadOptions; -import com.google.spanner.v1.RequestOptions.Priority; +import java.time.Duration; /** * The Cloud Spanner JDBC driver supports a number of client side statements that are interpreted by @@ -91,6 +91,10 @@ StatementResult statementSetDelayTransactionStartUntilFirstWrite( StatementResult statementShowDelayTransactionStartUntilFirstWrite(); + StatementResult statementSetKeepTransactionAlive(Boolean keepTransactionAlive); + + StatementResult statementShowKeepTransactionAlive(); + StatementResult statementSetStatementTag(String tag); StatementResult statementShowStatementTag(); @@ -99,6 +103,10 @@ StatementResult statementSetDelayTransactionStartUntilFirstWrite( StatementResult statementShowTransactionTag(); + StatementResult statementSetExcludeTxnFromChangeStreams(Boolean excludeTxnFromChangeStreams); + + StatementResult statementShowExcludeTxnFromChangeStreams(); + StatementResult statementBeginTransaction(); StatementResult statementBeginPgTransaction(PgTransactionMode transactionMode); @@ -124,7 +132,9 @@ StatementResult statementSetPgSessionCharacteristicsTransactionMode( StatementResult statementAbortBatch(); - StatementResult statementSetRPCPriority(Priority priority); + StatementResult statementResetAll(); + + StatementResult statementSetRPCPriority(RpcPriority priority); StatementResult statementShowRPCPriority(); @@ -134,6 +144,14 @@ StatementResult statementSetPgSessionCharacteristicsTransactionMode( StatementResult statementShowTransactionIsolationLevel(); + StatementResult statementSetProtoDescriptors(byte[] protoDescriptors); + + StatementResult statementSetProtoDescriptorsFilePath(String filePath); + + StatementResult statementShowProtoDescriptors(); + + StatementResult statementShowProtoDescriptorsFilePath(); + StatementResult statementExplain(String sql); StatementResult statementShowDataBoostEnabled(); diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionStatementExecutorImpl.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionStatementExecutorImpl.java index ec0ca4f4ac3..3e1040ba011 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionStatementExecutorImpl.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ConnectionStatementExecutorImpl.java @@ -20,6 +20,7 @@ import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.ABORT_BATCH; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.BEGIN; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.COMMIT; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.RESET_ALL; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.ROLLBACK; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.RUN_BATCH; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_AUTOCOMMIT; @@ -29,11 +30,15 @@ import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_DEFAULT_TRANSACTION_ISOLATION; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_DIRECTED_READ; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_EXCLUDE_TXN_FROM_CHANGE_STREAMS; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_KEEP_TRANSACTION_ALIVE; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_MAX_COMMIT_DELAY; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_MAX_PARTITIONED_PARALLELISM; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_MAX_PARTITIONS; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_OPTIMIZER_STATISTICS_PACKAGE; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_OPTIMIZER_VERSION; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_PROTO_DESCRIPTORS; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_PROTO_DESCRIPTORS_FILE_PATH; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_READONLY; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_READ_ONLY_STALENESS; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SET_RETRY_ABORTS_INTERNALLY; @@ -52,11 +57,15 @@ import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_DATA_BOOST_ENABLED; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_DIRECTED_READ; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_EXCLUDE_TXN_FROM_CHANGE_STREAMS; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_KEEP_TRANSACTION_ALIVE; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_MAX_COMMIT_DELAY; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_MAX_PARTITIONED_PARALLELISM; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_MAX_PARTITIONS; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_OPTIMIZER_STATISTICS_PACKAGE; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_OPTIMIZER_VERSION; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_PROTO_DESCRIPTORS; +import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_PROTO_DESCRIPTORS_FILE_PATH; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_READONLY; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_READ_ONLY_STALENESS; import static com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType.SHOW_READ_TIMESTAMP; @@ -93,16 +102,13 @@ import com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType; import com.google.common.base.MoreObjects; import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableMap; -import com.google.protobuf.Duration; import com.google.spanner.v1.DirectedReadOptions; import com.google.spanner.v1.PlanNode; import com.google.spanner.v1.QueryPlan; import com.google.spanner.v1.RequestOptions; -import com.google.spanner.v1.RequestOptions.Priority; +import java.time.Duration; import java.util.ArrayList; import java.util.Collections; -import java.util.Map; import java.util.concurrent.TimeUnit; import javax.annotation.Nullable; @@ -130,16 +136,6 @@ public boolean hasDuration() { } } - private static final Map validRPCPriorityValues; - - static { - ImmutableMap.Builder builder = ImmutableMap.builder(); - builder.put(Priority.PRIORITY_HIGH, RpcPriority.HIGH); - builder.put(Priority.PRIORITY_MEDIUM, RpcPriority.MEDIUM); - builder.put(Priority.PRIORITY_LOW, RpcPriority.LOW); - validRPCPriorityValues = builder.build(); - } - /** The connection to execute the statements on. */ private final ConnectionImpl connection; @@ -209,14 +205,19 @@ public StatementResult statementShowAutocommitDmlMode() { @Override public StatementResult statementSetStatementTimeout(Duration duration) { - if (duration.getSeconds() == 0L && duration.getNanos() == 0) { + if (duration == null || duration.isZero()) { getConnection().clearStatementTimeout(); } else { + com.google.protobuf.Duration protoDuration = + com.google.protobuf.Duration.newBuilder() + .setSeconds(duration.getSeconds()) + .setNanos(duration.getNano()) + .build(); TimeUnit unit = ReadOnlyStalenessUtil.getAppropriateTimeUnit( - new ReadOnlyStalenessUtil.DurationGetter(duration)); + new ReadOnlyStalenessUtil.DurationGetter(protoDuration)); getConnection() - .setStatementTimeout(ReadOnlyStalenessUtil.durationToUnits(duration, unit), unit); + .setStatementTimeout(ReadOnlyStalenessUtil.durationToUnits(protoDuration, unit), unit); } return noResult(SET_STATEMENT_TIMEOUT); } @@ -347,11 +348,7 @@ public StatementResult statementShowReturnCommitStats() { @Override public StatementResult statementSetMaxCommitDelay(Duration duration) { - getConnection() - .setMaxCommitDelay( - duration == null || duration.equals(Duration.getDefaultInstance()) - ? null - : java.time.Duration.ofSeconds(duration.getSeconds(), duration.getNanos())); + getConnection().setMaxCommitDelay(duration == null || duration.isZero() ? null : duration); return noResult(SET_MAX_COMMIT_DELAY); } @@ -381,6 +378,20 @@ public StatementResult statementShowDelayTransactionStartUntilFirstWrite() { SHOW_DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE); } + @Override + public StatementResult statementSetKeepTransactionAlive(Boolean keepTransactionAlive) { + getConnection().setKeepTransactionAlive(keepTransactionAlive); + return noResult(SET_KEEP_TRANSACTION_ALIVE); + } + + @Override + public StatementResult statementShowKeepTransactionAlive() { + return resultSet( + String.format("%sKEEP_TRANSACTION_ALIVE", getNamespace(connection.getDialect())), + getConnection().isKeepTransactionAlive(), + SHOW_KEEP_TRANSACTION_ALIVE); + } + @Override public StatementResult statementSetStatementTag(String tag) { getConnection().setStatementTag("".equals(tag) ? null : tag); @@ -409,6 +420,21 @@ public StatementResult statementShowTransactionTag() { SHOW_TRANSACTION_TAG); } + @Override + public StatementResult statementSetExcludeTxnFromChangeStreams( + Boolean excludeTxnFromChangeStreams) { + getConnection().setExcludeTxnFromChangeStreams(excludeTxnFromChangeStreams); + return noResult(SET_EXCLUDE_TXN_FROM_CHANGE_STREAMS); + } + + @Override + public StatementResult statementShowExcludeTxnFromChangeStreams() { + return resultSet( + String.format("%sEXCLUDE_TXN_FROM_CHANGE_STREAMS", getNamespace(connection.getDialect())), + getConnection().isExcludeTxnFromChangeStreams(), + SHOW_EXCLUDE_TXN_FROM_CHANGE_STREAMS); + } + @Override public StatementResult statementBeginTransaction() { getConnection().beginTransaction(); @@ -508,9 +534,14 @@ public StatementResult statementAbortBatch() { } @Override - public StatementResult statementSetRPCPriority(Priority priority) { - RpcPriority value = validRPCPriorityValues.get(priority); - getConnection().setRPCPriority(value); + public StatementResult statementResetAll() { + getConnection().reset(); + return noResult(RESET_ALL); + } + + @Override + public StatementResult statementSetRPCPriority(RpcPriority priority) { + getConnection().setRPCPriority(priority); return noResult(SET_RPC_PRIORITY); } @@ -620,6 +651,36 @@ public StatementResult statementRunPartitionedQuery(Statement statement) { ClientSideStatementType.RUN_PARTITIONED_QUERY); } + @Override + public StatementResult statementSetProtoDescriptors(byte[] protoDescriptors) { + Preconditions.checkNotNull(protoDescriptors); + getConnection().setProtoDescriptors(protoDescriptors); + return noResult(SET_PROTO_DESCRIPTORS); + } + + @Override + public StatementResult statementSetProtoDescriptorsFilePath(String filePath) { + Preconditions.checkNotNull(filePath); + getConnection().setProtoDescriptorsFilePath(filePath); + return noResult(SET_PROTO_DESCRIPTORS_FILE_PATH); + } + + @Override + public StatementResult statementShowProtoDescriptors() { + return resultSet( + String.format("%sPROTO_DESCRIPTORS", getNamespace(connection.getDialect())), + getConnection().getProtoDescriptors(), + SHOW_PROTO_DESCRIPTORS); + } + + @Override + public StatementResult statementShowProtoDescriptorsFilePath() { + return resultSet( + String.format("%sPROTO_DESCRIPTORS_FILE_PATH", getNamespace(connection.getDialect())), + getConnection().getProtoDescriptorsFilePath(), + SHOW_PROTO_DESCRIPTORS_FILE_PATH); + } + private String processQueryPlan(PlanNode planNode) { StringBuilder planNodeDescription = new StringBuilder(" : { "); com.google.protobuf.Struct metadata = planNode.getMetadata(); diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DdlBatch.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DdlBatch.java index 55b780c5718..6ae28822473 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DdlBatch.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DdlBatch.java @@ -35,12 +35,17 @@ import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.MoreExecutors; import com.google.spanner.admin.database.v1.DatabaseAdminGrpc; import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.StatusCode; +import io.opentelemetry.context.Scope; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.concurrent.Callable; +import javax.annotation.Nonnull; /** * {@link UnitOfWork} that is used when a DDL batch is started. These batches only accept DDL @@ -55,10 +60,12 @@ class DdlBatch extends AbstractBaseUnitOfWork { private final DatabaseClient dbClient; private final List statements = new ArrayList<>(); private UnitOfWorkState state = UnitOfWorkState.STARTED; + private final byte[] protoDescriptors; static class Builder extends AbstractBaseUnitOfWork.Builder { private DdlClient ddlClient; private DatabaseClient dbClient; + private byte[] protoDescriptors; private Builder() {} @@ -74,6 +81,11 @@ Builder setDatabaseClient(DatabaseClient client) { return this; } + Builder setProtoDescriptors(byte[] protoDescriptors) { + this.protoDescriptors = protoDescriptors; + return this; + } + @Override DdlBatch build() { Preconditions.checkState(ddlClient != null, "No DdlClient specified"); @@ -90,6 +102,12 @@ private DdlBatch(Builder builder) { super(builder); this.ddlClient = builder.ddlClient; this.dbClient = builder.dbClient; + this.protoDescriptors = builder.protoDescriptors; + } + + @Override + public boolean isSingleUse() { + return false; } @Override @@ -204,36 +222,51 @@ public ApiFuture writeAsync(CallType callType, Iterable mutation public ApiFuture runBatchAsync(CallType callType) { ConnectionPreconditions.checkState( state == UnitOfWorkState.STARTED, "The batch is no longer active and cannot be ran"); - if (statements.isEmpty()) { - this.state = UnitOfWorkState.RAN; - return ApiFutures.immediateFuture(new long[0]); - } - // create a statement that can be passed in to the execute method - Callable callable = - () -> { - try { - OperationFuture operation = - ddlClient.executeDdl(statements); + try (Scope ignore = span.makeCurrent()) { + if (statements.isEmpty()) { + this.state = UnitOfWorkState.RAN; + return ApiFutures.transform( + asyncEndUnitOfWorkSpan(), unused -> new long[0], MoreExecutors.directExecutor()); + } + // Set the DDL statements on the span. + + span.setAllAttributes(Attributes.of(DB_STATEMENT_ARRAY_KEY, statements)); + // create a statement that can be passed in to the execute method + Callable callable = + () -> { try { - // Wait until the operation has finished. - getWithStatementTimeout(operation, RUN_BATCH_STATEMENT); - long[] updateCounts = new long[statements.size()]; - Arrays.fill(updateCounts, 1L); - state = UnitOfWorkState.RAN; - return updateCounts; - } catch (SpannerException e) { - long[] updateCounts = extractUpdateCounts(operation); - throw SpannerExceptionFactory.newSpannerBatchUpdateException( - e.getErrorCode(), e.getMessage(), updateCounts); + OperationFuture operation = + ddlClient.executeDdl(statements, protoDescriptors); + try { + // Wait until the operation has finished. + getWithStatementTimeout(operation, RUN_BATCH_STATEMENT); + long[] updateCounts = new long[statements.size()]; + Arrays.fill(updateCounts, 1L); + state = UnitOfWorkState.RAN; + return updateCounts; + } catch (SpannerException e) { + long[] updateCounts = extractUpdateCounts(operation); + throw SpannerExceptionFactory.newSpannerBatchUpdateException( + e.getErrorCode(), e.getMessage(), updateCounts); + } + } catch (Throwable t) { + span.setStatus(StatusCode.ERROR); + span.recordException(t); + state = UnitOfWorkState.RUN_FAILED; + throw t; } - } catch (Throwable t) { - state = UnitOfWorkState.RUN_FAILED; - throw t; - } - }; - this.state = UnitOfWorkState.RUNNING; - return executeStatementAsync( - callType, RUN_BATCH_STATEMENT, callable, DatabaseAdminGrpc.getUpdateDatabaseDdlMethod()); + }; + this.state = UnitOfWorkState.RUNNING; + ApiFuture result = + executeStatementAsync( + callType, + RUN_BATCH_STATEMENT, + callable, + DatabaseAdminGrpc.getUpdateDatabaseDdlMethod()); + asyncEndUnitOfWorkSpan(); + + return result; + } } long[] extractUpdateCounts(OperationFuture operation) { @@ -261,17 +294,20 @@ long[] extractUpdateCounts(UpdateDatabaseDdlMetadata metadata) { public void abortBatch() { ConnectionPreconditions.checkState( state == UnitOfWorkState.STARTED, "The batch is no longer active and cannot be aborted."); + asyncEndUnitOfWorkSpan(); this.state = UnitOfWorkState.ABORTED; } @Override - public ApiFuture commitAsync(CallType callType) { + public ApiFuture commitAsync( + @Nonnull CallType callType, @Nonnull EndTransactionCallback callback) { throw SpannerExceptionFactory.newSpannerException( ErrorCode.FAILED_PRECONDITION, "Commit is not allowed for DDL batches."); } @Override - public ApiFuture rollbackAsync(CallType callType) { + public ApiFuture rollbackAsync( + @Nonnull CallType callType, @Nonnull EndTransactionCallback callback) { throw SpannerExceptionFactory.newSpannerException( ErrorCode.FAILED_PRECONDITION, "Rollback is not allowed for DDL batches."); } diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DdlClient.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DdlClient.java index fedf60d7a91..7bce1ab78cd 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DdlClient.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DdlClient.java @@ -19,6 +19,7 @@ import com.google.api.gax.longrunning.OperationFuture; import com.google.cloud.spanner.Database; import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseId; import com.google.cloud.spanner.Dialect; import com.google.cloud.spanner.ErrorCode; import com.google.cloud.spanner.SpannerExceptionFactory; @@ -35,11 +36,13 @@ */ class DdlClient { private final DatabaseAdminClient dbAdminClient; + private final String projectId; private final String instanceId; private final String databaseName; static class Builder { private DatabaseAdminClient dbAdminClient; + private String projectId; private String instanceId; private String databaseName; @@ -51,6 +54,13 @@ Builder setDatabaseAdminClient(DatabaseAdminClient client) { return this; } + Builder setProjectId(String projectId) { + Preconditions.checkArgument( + !Strings.isNullOrEmpty(projectId), "Empty projectId is not allowed"); + this.projectId = projectId; + return this; + } + Builder setInstanceId(String instanceId) { Preconditions.checkArgument( !Strings.isNullOrEmpty(instanceId), "Empty instanceId is not allowed"); @@ -67,6 +77,7 @@ Builder setDatabaseName(String name) { DdlClient build() { Preconditions.checkState(dbAdminClient != null, "No DatabaseAdminClient specified"); + Preconditions.checkState(!Strings.isNullOrEmpty(projectId), "No ProjectId specified"); Preconditions.checkState(!Strings.isNullOrEmpty(instanceId), "No InstanceId specified"); Preconditions.checkArgument( !Strings.isNullOrEmpty(databaseName), "No database name specified"); @@ -80,6 +91,7 @@ static Builder newBuilder() { private DdlClient(Builder builder) { this.dbAdminClient = builder.dbAdminClient; + this.projectId = builder.projectId; this.instanceId = builder.instanceId; this.databaseName = builder.databaseName; } @@ -92,17 +104,24 @@ OperationFuture executeCreateDatabase( } /** Execute a single DDL statement. */ - OperationFuture executeDdl(String ddl) { - return executeDdl(Collections.singletonList(ddl)); + OperationFuture executeDdl(String ddl, byte[] protoDescriptors) { + return executeDdl(Collections.singletonList(ddl), protoDescriptors); } /** Execute a list of DDL statements as one operation. */ - OperationFuture executeDdl(List statements) { + OperationFuture executeDdl( + List statements, byte[] protoDescriptors) { if (statements.stream().anyMatch(DdlClient::isCreateDatabaseStatement)) { throw SpannerExceptionFactory.newSpannerException( ErrorCode.INVALID_ARGUMENT, "CREATE DATABASE is not supported in a DDL batch"); } - return dbAdminClient.updateDatabaseDdl(instanceId, databaseName, statements, null); + Database.Builder dbBuilder = + dbAdminClient.newDatabaseBuilder(DatabaseId.of(projectId, instanceId, databaseName)); + if (protoDescriptors != null) { + dbBuilder.setProtoDescriptors(protoDescriptors); + } + Database db = dbBuilder.build(); + return dbAdminClient.updateDatabaseDdl(db, statements, null); } /** Returns true if the statement is a `CREATE DATABASE ...` statement. */ diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DirectedReadOptionsUtil.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DirectedReadOptionsUtil.java index 8b1f8a90199..8b346a08f3d 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DirectedReadOptionsUtil.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DirectedReadOptionsUtil.java @@ -23,6 +23,25 @@ import com.google.spanner.v1.DirectedReadOptions; public class DirectedReadOptionsUtil { + static class DirectedReadOptionsConverter + implements ClientSideStatementValueConverter { + static DirectedReadOptionsConverter INSTANCE = new DirectedReadOptionsConverter(); + + @Override + public Class getParameterClass() { + return DirectedReadOptions.class; + } + + @Override + public DirectedReadOptions convert(String value) { + try { + return parse(value); + } catch (Throwable ignore) { + // ClientSideStatementValueConverters should return null if the value cannot be converted. + return null; + } + } + } /** * Generates a valid JSON string for the given {@link DirectedReadOptions} that can be used with diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DmlBatch.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DmlBatch.java index d537a8ee997..ea7732a7c0a 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DmlBatch.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/DmlBatch.java @@ -33,8 +33,10 @@ import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; import com.google.common.base.Preconditions; import com.google.common.util.concurrent.MoreExecutors; +import io.opentelemetry.context.Scope; import java.util.ArrayList; import java.util.List; +import javax.annotation.Nonnull; /** * {@link UnitOfWork} that is used when a DML batch is started. These batches only accept DML @@ -77,10 +79,15 @@ static Builder newBuilder() { private DmlBatch(Builder builder) { super(builder); - this.transaction = builder.transaction; + this.transaction = Preconditions.checkNotNull(builder.transaction); this.statementTag = builder.statementTag; } + @Override + public boolean isSingleUse() { + return false; + } + @Override public Type getType() { return Type.BATCH; @@ -168,8 +175,11 @@ public ApiFuture executeUpdateAsync( @Override public ApiFuture analyzeUpdateAsync( CallType callType, ParsedStatement update, AnalyzeMode analyzeMode, UpdateOption... options) { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.FAILED_PRECONDITION, "Analyzing updates is not allowed for DML batches."); + if (transaction.isSingleUse()) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Analyzing updates is not allowed for DML batches."); + } + return transaction.analyzeUpdateAsync(callType, update, analyzeMode, options); } @Override @@ -189,68 +199,74 @@ public ApiFuture writeAsync(CallType callType, Iterable mutation public ApiFuture runBatchAsync(CallType callType) { ConnectionPreconditions.checkState( state == UnitOfWorkState.STARTED, "The batch is no longer active and cannot be ran"); - if (statements.isEmpty()) { - this.state = UnitOfWorkState.RAN; - return ApiFutures.immediateFuture(new long[0]); - } - this.state = UnitOfWorkState.RUNNING; - // Use a SettableApiFuture to return the result, instead of directly returning the future that - // is returned by the executeBatchUpdateAsync method. This is needed because the state of the - // batch is set after the update has finished, and this happens in a listener. A listener is - // executed AFTER a Future is done, which means that a user could read the state of the Batch - // before it has been changed. - final SettableApiFuture res = SettableApiFuture.create(); - int numOptions = 0; - if (statementTag != null) { - numOptions++; - } - if (this.rpcPriority != null) { - numOptions++; - } - UpdateOption[] options = new UpdateOption[numOptions]; - int index = 0; - if (statementTag != null) { - options[index++] = Options.tag(statementTag); - } - if (this.rpcPriority != null) { - options[index++] = Options.priority(this.rpcPriority); + try (Scope ignore = span.makeCurrent()) { + if (statements.isEmpty()) { + this.state = UnitOfWorkState.RAN; + return ApiFutures.immediateFuture(new long[0]); + } + this.state = UnitOfWorkState.RUNNING; + // Use a SettableApiFuture to return the result, instead of directly returning the future that + // is returned by the executeBatchUpdateAsync method. This is needed because the state of the + // batch is set after the update has finished, and this happens in a listener. A listener is + // executed AFTER a Future is done, which means that a user could read the state of the Batch + // before it has been changed. + final SettableApiFuture res = SettableApiFuture.create(); + int numOptions = 0; + if (statementTag != null) { + numOptions++; + } + if (this.rpcPriority != null) { + numOptions++; + } + UpdateOption[] options = new UpdateOption[numOptions]; + int index = 0; + if (statementTag != null) { + options[index++] = Options.tag(statementTag); + } + if (this.rpcPriority != null) { + options[index++] = Options.priority(this.rpcPriority); + } + ApiFuture updateCounts = + transaction.executeBatchUpdateAsync(callType, statements, options); + ApiFutures.addCallback( + updateCounts, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable t) { + state = UnitOfWorkState.RUN_FAILED; + res.setException(t); + } + + @Override + public void onSuccess(long[] result) { + state = UnitOfWorkState.RAN; + res.set(result); + } + }, + MoreExecutors.directExecutor()); + asyncEndUnitOfWorkSpan(); + return res; } - ApiFuture updateCounts = - transaction.executeBatchUpdateAsync(callType, statements, options); - ApiFutures.addCallback( - updateCounts, - new ApiFutureCallback() { - @Override - public void onFailure(Throwable t) { - state = UnitOfWorkState.RUN_FAILED; - res.setException(t); - } - - @Override - public void onSuccess(long[] result) { - state = UnitOfWorkState.RAN; - res.set(result); - } - }, - MoreExecutors.directExecutor()); - return res; } @Override public void abortBatch() { ConnectionPreconditions.checkState( state == UnitOfWorkState.STARTED, "The batch is no longer active and cannot be aborted."); + asyncEndUnitOfWorkSpan(); this.state = UnitOfWorkState.ABORTED; } @Override - public ApiFuture commitAsync(CallType callType) { + public ApiFuture commitAsync( + @Nonnull CallType callType, @Nonnull EndTransactionCallback callback) { throw SpannerExceptionFactory.newSpannerException( ErrorCode.FAILED_PRECONDITION, "Commit is not allowed for DML batches."); } @Override - public ApiFuture rollbackAsync(CallType callType) { + public ApiFuture rollbackAsync( + @Nonnull CallType callType, @Nonnull EndTransactionCallback callback) { throw SpannerExceptionFactory.newSpannerException( ErrorCode.FAILED_PRECONDITION, "Rollback is not allowed for DML batches."); } diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/MergedResultSet.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/MergedResultSet.java index 36451bc8f40..fcbc49f346d 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/MergedResultSet.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/MergedResultSet.java @@ -18,6 +18,7 @@ import static com.google.common.base.Preconditions.checkState; +import com.google.cloud.spanner.ErrorCode; import com.google.cloud.spanner.ForwardingStructReader; import com.google.cloud.spanner.ResultSet; import com.google.cloud.spanner.SpannerException; @@ -30,6 +31,7 @@ import com.google.spanner.v1.ResultSetStats; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingDeque; @@ -47,15 +49,18 @@ static class PartitionExecutor implements Runnable { private final Connection connection; private final String partitionId; private final LinkedBlockingDeque queue; + private final CountDownLatch metadataAvailableLatch; private final AtomicBoolean shouldStop = new AtomicBoolean(); PartitionExecutor( Connection connection, String partitionId, - LinkedBlockingDeque queue) { + LinkedBlockingDeque queue, + CountDownLatch metadataAvailableLatch) { this.connection = Preconditions.checkNotNull(connection); this.partitionId = Preconditions.checkNotNull(partitionId); this.queue = queue; + this.metadataAvailableLatch = Preconditions.checkNotNull(metadataAvailableLatch); } @Override @@ -68,6 +73,7 @@ public void run() { queue.put( PartitionExecutorResult.dataAndMetadata( row, resultSet.getType(), resultSet.getMetadata())); + metadataAvailableLatch.countDown(); first = false; } else { queue.put(PartitionExecutorResult.data(row)); @@ -82,9 +88,11 @@ public void run() { queue.put( PartitionExecutorResult.typeAndMetadata( resultSet.getType(), resultSet.getMetadata())); + metadataAvailableLatch.countDown(); } } catch (Throwable exception) { putWithoutInterruptPropagation(PartitionExecutorResult.exception(exception)); + metadataAvailableLatch.countDown(); } finally { // Emit a special 'finished' result to ensure that the row producer is not blocked on a // queue that never receives any more results. This ensures that we can safely block on @@ -215,6 +223,7 @@ private static class RowProducerImpl implements RowProducer { private final AtomicInteger finishedCounter; private final LinkedBlockingDeque queue; private ResultSetMetadata metadata; + private final CountDownLatch metadataAvailableLatch = new CountDownLatch(1); private Type type; private Struct currentRow; private Throwable exception; @@ -243,7 +252,7 @@ private static class RowProducerImpl implements RowProducer { this.finishedCounter = new AtomicInteger(partitions.size()); for (String partition : partitions) { PartitionExecutor partitionExecutor = - new PartitionExecutor(connection, partition, this.queue); + new PartitionExecutor(connection, partition, this.queue, this.metadataAvailableLatch); this.partitionExecutors.add(partitionExecutor); this.executor.submit(partitionExecutor); } @@ -310,8 +319,27 @@ public Struct get() { return currentRow; } + private PartitionExecutorResult getFirstResult() { + try { + metadataAvailableLatch.await(); + } catch (InterruptedException interruptedException) { + throw SpannerExceptionFactory.propagateInterrupt(interruptedException); + } + PartitionExecutorResult result = queue.peek(); + if (result == null) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Thread-unsafe access to ResultSet"); + } + if (result.exception != null) { + throw SpannerExceptionFactory.asSpannerException(result.exception); + } + return result; + } + public ResultSetMetadata getMetadata() { - checkState(metadata != null, "next() call required"); + if (metadata == null) { + return getFirstResult().metadata; + } return metadata; } @@ -326,7 +354,9 @@ public int getParallelism() { } public Type getType() { - checkState(type != null, "next() call required"); + if (type == null) { + return getFirstResult().type; + } return type; } } diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadOnlyStalenessUtil.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadOnlyStalenessUtil.java index dd2b8612ec1..10c8178efb3 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadOnlyStalenessUtil.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadOnlyStalenessUtil.java @@ -27,6 +27,7 @@ import com.google.cloud.spanner.TimestampBound.Mode; import com.google.protobuf.Duration; import com.google.protobuf.util.Durations; +import java.time.temporal.ChronoUnit; import java.util.concurrent.TimeUnit; /** @@ -93,6 +94,31 @@ static TimeUnit parseTimeUnit(String unit) { ErrorCode.INVALID_ARGUMENT, "Invalid option for time unit: " + unit); } + /** + * Convert from {@link TimeUnit} to {@link ChronoUnit}. This code is copied from {@link + * TimeUnit#toChronoUnit()}, which is available in Java 9 and higher. + */ + static ChronoUnit toChronoUnit(TimeUnit timeUnit) { + switch (timeUnit) { + case NANOSECONDS: + return ChronoUnit.NANOS; + case MICROSECONDS: + return ChronoUnit.MICROS; + case MILLISECONDS: + return ChronoUnit.MILLIS; + case SECONDS: + return ChronoUnit.SECONDS; + case MINUTES: + return ChronoUnit.MINUTES; + case HOURS: + return ChronoUnit.HOURS; + case DAYS: + return ChronoUnit.DAYS; + default: + throw new IllegalArgumentException(); + } + } + /** * Internal interface that is used to generalize getting a time duration from Cloud Spanner * read-only staleness settings. diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadOnlyTransaction.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadOnlyTransaction.java index 63e5221362e..357503cb17f 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadOnlyTransaction.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadOnlyTransaction.java @@ -17,7 +17,6 @@ package com.google.cloud.spanner.connection; import com.google.api.core.ApiFuture; -import com.google.api.core.ApiFutures; import com.google.cloud.Timestamp; import com.google.cloud.spanner.BatchClient; import com.google.cloud.spanner.BatchReadOnlyTransaction; @@ -38,7 +37,9 @@ import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.spanner.v1.SpannerGrpc; +import io.opentelemetry.context.Scope; import java.util.concurrent.Callable; +import javax.annotation.Nonnull; /** * Transaction that is used when a {@link Connection} is in read-only mode or when the transaction @@ -186,21 +187,23 @@ public ApiFuture partitionQueryAsync( // statement in the transaction is to partition a query. // Using a batch-read-only transaction for every read-only transaction is not efficient, as // these transactions use a session that is created synchronously only for this transaction. - if (transaction == null) { - batchReadOnlyTransaction = batchClient.batchReadOnlyTransaction(readOnlyStaleness); - transaction = batchReadOnlyTransaction; - } else if (batchReadOnlyTransaction == null) { - batchReadOnlyTransaction = - batchClient.batchReadOnlyTransaction( - TimestampBound.ofReadTimestamp(transaction.getReadTimestamp())); + try (Scope ignore = span.makeCurrent()) { + if (transaction == null) { + batchReadOnlyTransaction = batchClient.batchReadOnlyTransaction(readOnlyStaleness); + transaction = batchReadOnlyTransaction; + } else if (batchReadOnlyTransaction == null) { + batchReadOnlyTransaction = + batchClient.batchReadOnlyTransaction( + TimestampBound.ofReadTimestamp(transaction.getReadTimestamp())); + } + Callable callable = + () -> partitionQuery(batchReadOnlyTransaction, partitionOptions, query, options); + return executeStatementAsync( + callType, + query, + callable, + ImmutableList.of(SpannerGrpc.getExecuteSqlMethod(), SpannerGrpc.getCommitMethod())); } - Callable callable = - () -> partitionQuery(batchReadOnlyTransaction, partitionOptions, query, options); - return executeStatementAsync( - callType, - query, - callable, - ImmutableList.of(SpannerGrpc.getExecuteSqlMethod(), SpannerGrpc.getCommitMethod())); } @Override @@ -239,26 +242,41 @@ public ApiFuture writeAsync(CallType callType, Iterable mutation } @Override - public ApiFuture commitAsync(CallType callType) { - closeTransactions(); - this.state = UnitOfWorkState.COMMITTED; - return ApiFutures.immediateFuture(null); + public ApiFuture commitAsync( + @Nonnull CallType callType, @Nonnull EndTransactionCallback callback) { + try (Scope ignore = span.makeCurrent()) { + ApiFuture result = closeTransactions(); + callback.onSuccess(); + this.state = UnitOfWorkState.COMMITTED; + return result; + } catch (Throwable throwable) { + callback.onFailure(); + throw throwable; + } } @Override - public ApiFuture rollbackAsync(CallType callType) { - closeTransactions(); - this.state = UnitOfWorkState.ROLLED_BACK; - return ApiFutures.immediateFuture(null); + public ApiFuture rollbackAsync( + @Nonnull CallType callType, @Nonnull EndTransactionCallback callback) { + try (Scope ignore = span.makeCurrent()) { + ApiFuture result = closeTransactions(); + callback.onSuccess(); + this.state = UnitOfWorkState.ROLLED_BACK; + return result; + } catch (Throwable throwable) { + callback.onFailure(); + throw throwable; + } } - private void closeTransactions() { + private ApiFuture closeTransactions() { if (this.transaction != null) { this.transaction.close(); } if (this.batchReadOnlyTransaction != null) { this.batchReadOnlyTransaction.close(); } + return asyncEndUnitOfWorkSpan(); } @Override diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadWriteTransaction.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadWriteTransaction.java index 86d6feff90e..6a83142b175 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadWriteTransaction.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/ReadWriteTransaction.java @@ -21,6 +21,7 @@ import static com.google.cloud.spanner.connection.AbstractStatementParser.COMMIT_STATEMENT; import static com.google.cloud.spanner.connection.AbstractStatementParser.ROLLBACK_STATEMENT; import static com.google.cloud.spanner.connection.AbstractStatementParser.RUN_BATCH_STATEMENT; +import static com.google.cloud.spanner.connection.ConnectionOptions.tryParseLong; import static com.google.common.base.Preconditions.checkNotNull; import com.google.api.core.ApiFuture; @@ -33,6 +34,7 @@ import com.google.cloud.spanner.AbortedException; import com.google.cloud.spanner.CommitResponse; import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.Dialect; import com.google.cloud.spanner.ErrorCode; import com.google.cloud.spanner.Mutation; import com.google.cloud.spanner.Options; @@ -45,6 +47,7 @@ import com.google.cloud.spanner.SpannerException; import com.google.cloud.spanner.SpannerExceptionFactory; import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.ThreadFactoryUtil; import com.google.cloud.spanner.TransactionContext; import com.google.cloud.spanner.TransactionManager; import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; @@ -57,17 +60,25 @@ import com.google.common.collect.Iterables; import com.google.common.util.concurrent.MoreExecutors; import com.google.spanner.v1.SpannerGrpc; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.context.Scope; import java.time.Duration; import java.util.ArrayList; import java.util.LinkedList; import java.util.List; import java.util.Objects; import java.util.concurrent.Callable; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantLock; import java.util.logging.Level; import java.util.logging.Logger; +import javax.annotation.Nonnull; /** * Transaction that is used when a {@link Connection} is normal read/write mode (i.e. not autocommit @@ -79,7 +90,19 @@ * exact same results as the original transaction. */ class ReadWriteTransaction extends AbstractMultiUseTransaction { + private static final AttributeKey TRANSACTION_RETRIED = + AttributeKey.booleanKey("transaction.retried"); private static final Logger logger = Logger.getLogger(ReadWriteTransaction.class.getName()); + private static final ThreadFactory KEEP_ALIVE_THREAD_FACTORY = + ThreadFactoryUtil.createVirtualOrPlatformDaemonThreadFactory( + "read-write-transaction-keep-alive", true); + private static final ScheduledExecutorService KEEP_ALIVE_SERVICE = + Executors.newSingleThreadScheduledExecutor(KEEP_ALIVE_THREAD_FACTORY); + private static final ParsedStatement SELECT1_STATEMENT = + AbstractStatementParser.getInstance(Dialect.GOOGLE_STANDARD_SQL) + .parse(Statement.of("SELECT 1")); + private static final long DEFAULT_KEEP_ALIVE_INTERVAL_MILLIS = 8000L; + private static final AtomicLong ID_GENERATOR = new AtomicLong(); private static final String MAX_INTERNAL_RETRIES_EXCEEDED = "Internal transaction retry maximum exceeded"; @@ -99,6 +122,8 @@ class ReadWriteTransaction extends AbstractMultiUseTransaction { */ private static final String AUTO_SAVEPOINT_NAME = "_auto_savepoint"; + private final boolean usesEmulator; + /** * Indicates whether an automatic savepoint should be generated after each statement, so the * transaction can be manually aborted and retried by the Connection API when connected to the @@ -122,6 +147,9 @@ class ReadWriteTransaction extends AbstractMultiUseTransaction { private TransactionManager txManager; private final boolean retryAbortsInternally; private final boolean delayTransactionStartUntilFirstWrite; + private final boolean keepTransactionAlive; + private final long keepAliveIntervalMillis; + private final ReentrantLock keepAliveLock; private final SavepointSupport savepointSupport; private int transactionRetryAttempts; private int successfulRetries; @@ -136,6 +164,7 @@ class ReadWriteTransaction extends AbstractMultiUseTransaction { private final List statements = new ArrayList<>(); private final List mutations = new ArrayList<>(); private Timestamp transactionStarted; + private ScheduledFuture keepAliveFuture; private static final class RollbackToSavepointException extends Exception { private final Savepoint savepoint; @@ -149,11 +178,28 @@ Savepoint getSavepoint() { } } + private final class StatementResultCallback implements ApiFutureCallback { + @Override + public void onFailure(Throwable t) { + if (t instanceof SpannerException) { + handlePossibleInvalidatingException((SpannerException) t); + } + maybeScheduleKeepAlivePing(); + } + + @Override + public void onSuccess(V result) { + maybeScheduleKeepAlivePing(); + } + } + static class Builder extends AbstractMultiUseTransaction.Builder { + private boolean usesEmulator; private boolean useAutoSavepointsForEmulator; private DatabaseClient dbClient; private Boolean retryAbortsInternally; private boolean delayTransactionStartUntilFirstWrite; + private boolean keepTransactionAlive; private boolean returnCommitStats; private Duration maxCommitDelay; private SavepointSupport savepointSupport; @@ -161,6 +207,11 @@ static class Builder extends AbstractMultiUseTransaction.Builder 0 + ? keepAliveIntervalMillis + : DEFAULT_KEEP_ALIVE_INTERVAL_MILLIS, + TimeUnit.MILLISECONDS); + } + } finally { + keepAliveLock.unlock(); + } + } + } + + private void cancelScheduledKeepAlivePing() { + if (keepAliveLock != null) { + keepAliveLock.lock(); + try { + if (keepAliveFuture != null) { + keepAliveFuture.cancel(false); + } + } finally { + keepAliveLock.unlock(); + } + } + } + + private class KeepAliveRunnable implements Runnable { + @Override + public void run() { + if (shouldPing()) { + // Do a shoot-and-forget ping and schedule a new ping over 8 seconds after this ping has + // finished. + ApiFuture future = + executeQueryAsync( + CallType.SYNC, + SELECT1_STATEMENT, + AnalyzeMode.NONE, + Options.tag( + System.getProperty( + "spanner.connection.keep_alive_query_tag", + "connection.transaction-keep-alive"))); + future.addListener( + ReadWriteTransaction.this::maybeScheduleKeepAlivePing, MoreExecutors.directExecutor()); + } + } + } + private void checkTimedOut() { ConnectionPreconditions.checkState( !timedOutOrCancelled, @@ -484,76 +617,69 @@ public ApiFuture executeQueryAsync( (statement.getType() == StatementType.QUERY) || (statement.getType() == StatementType.UPDATE && statement.hasReturningClause()), "Statement must be a query or DML with returning clause"); - checkOrCreateValidTransaction(statement, callType); - - ApiFuture res; - if (retryAbortsInternally && txContextFuture != null) { - res = - executeStatementAsync( - callType, - statement, - () -> { - checkTimedOut(); - return runWithRetry( - () -> { - try { - getStatementExecutor() - .invokeInterceptors( - statement, - StatementExecutionStep.EXECUTE_STATEMENT, - ReadWriteTransaction.this); - DirectExecuteResultSet delegate = - DirectExecuteResultSet.ofResultSet( - internalExecuteQuery(statement, analyzeMode, options)); - return createAndAddRetryResultSet( - delegate, statement, analyzeMode, options); - } catch (AbortedException e) { - throw e; - } catch (SpannerException e) { - createAndAddFailedQuery(e, statement, analyzeMode, options); - throw e; - } - }); - }, - // ignore interceptors here as they are invoked in the Callable. - InterceptorsUsage.IGNORE_INTERCEPTORS, - ImmutableList.of(SpannerGrpc.getExecuteStreamingSqlMethod())); - } else { - res = super.executeQueryAsync(callType, statement, analyzeMode, options); + try (Scope ignore = span.makeCurrent()) { + checkOrCreateValidTransaction(statement, callType); + + ApiFuture res; + if (retryAbortsInternally && txContextFuture != null) { + res = + executeStatementAsync( + callType, + statement, + () -> { + checkTimedOut(); + return runWithRetry( + () -> { + try { + getStatementExecutor() + .invokeInterceptors( + statement, + StatementExecutionStep.EXECUTE_STATEMENT, + ReadWriteTransaction.this); + DirectExecuteResultSet delegate = + DirectExecuteResultSet.ofResultSet( + internalExecuteQuery(statement, analyzeMode, options)); + return createAndAddRetryResultSet( + delegate, statement, analyzeMode, options); + } catch (AbortedException e) { + throw e; + } catch (SpannerException e) { + createAndAddFailedQuery(e, statement, analyzeMode, options); + throw e; + } + }); + }, + // ignore interceptors here as they are invoked in the Callable. + InterceptorsUsage.IGNORE_INTERCEPTORS, + ImmutableList.of(SpannerGrpc.getExecuteStreamingSqlMethod())); + } else { + res = super.executeQueryAsync(callType, statement, analyzeMode, options); + } + ApiFutures.addCallback(res, new StatementResultCallback<>(), MoreExecutors.directExecutor()); + return res; } - ApiFutures.addCallback( - res, - new ApiFutureCallback() { - @Override - public void onFailure(Throwable t) { - if (t instanceof SpannerException) { - handlePossibleInvalidatingException((SpannerException) t); - } - } - - @Override - public void onSuccess(ResultSet result) {} - }, - MoreExecutors.directExecutor()); - return res; } @Override public ApiFuture analyzeUpdateAsync( CallType callType, ParsedStatement update, AnalyzeMode analyzeMode, UpdateOption... options) { - return ApiFutures.transform( - internalExecuteUpdateAsync(callType, update, analyzeMode, options), - Tuple::y, - MoreExecutors.directExecutor()); + try (Scope ignore = span.makeCurrent()) { + return ApiFutures.transform( + internalExecuteUpdateAsync(callType, update, analyzeMode, options), + Tuple::y, + MoreExecutors.directExecutor()); + } } @Override public ApiFuture executeUpdateAsync( CallType callType, final ParsedStatement update, final UpdateOption... options) { - return ApiFutures.transform( - internalExecuteUpdateAsync(callType, update, AnalyzeMode.NONE, options), - Tuple::x, - MoreExecutors.directExecutor()); + try (Scope ignore = span.makeCurrent()) { + return ApiFutures.transform( + internalExecuteUpdateAsync(callType, update, AnalyzeMode.NONE, options), + Tuple::x, + MoreExecutors.directExecutor()); + } } /** @@ -640,20 +766,7 @@ private ApiFuture> internalExecuteUpdateAsync( }, SpannerGrpc.getExecuteSqlMethod()); } - ApiFutures.addCallback( - res, - new ApiFutureCallback>() { - @Override - public void onFailure(Throwable t) { - if (t instanceof SpannerException) { - handlePossibleInvalidatingException((SpannerException) t); - } - } - - @Override - public void onSuccess(Tuple result) {} - }, - MoreExecutors.directExecutor()); + ApiFutures.addCallback(res, new StatementResultCallback<>(), MoreExecutors.directExecutor()); return res; } @@ -661,86 +774,78 @@ public void onSuccess(Tuple result) {} public ApiFuture executeBatchUpdateAsync( CallType callType, Iterable updates, final UpdateOption... options) { Preconditions.checkNotNull(updates); - final List updateStatements = new LinkedList<>(); - for (ParsedStatement update : updates) { - Preconditions.checkArgument( - update.isUpdate(), - "Statement is not an update statement: " + update.getSqlWithoutComments()); - updateStatements.add(update.getStatement()); - } - checkOrCreateValidTransaction(Iterables.getFirst(updates, null), callType); - - ApiFuture res; - if (retryAbortsInternally) { - res = - executeStatementAsync( - callType, - RUN_BATCH_STATEMENT, - () -> { - checkTimedOut(); - return runWithRetry( - () -> { - try { - getStatementExecutor() - .invokeInterceptors( - RUN_BATCH_STATEMENT, - StatementExecutionStep.EXECUTE_STATEMENT, - ReadWriteTransaction.this); - long[] updateCounts = - get(txContextFuture).batchUpdate(updateStatements, options); - createAndAddRetriableBatchUpdate(updateStatements, updateCounts, options); - return updateCounts; - } catch (AbortedException e) { - throw e; - } catch (SpannerException e) { - createAndAddFailedBatchUpdate(e, updateStatements); - throw e; - } - }); - }, - // ignore interceptors here as they are invoked in the Callable. - InterceptorsUsage.IGNORE_INTERCEPTORS, - ImmutableList.of(SpannerGrpc.getExecuteBatchDmlMethod())); - } else { - res = - executeStatementAsync( - callType, - RUN_BATCH_STATEMENT, - () -> { - checkTimedOut(); - checkAborted(); - return get(txContextFuture).batchUpdate(updateStatements); - }, - SpannerGrpc.getExecuteBatchDmlMethod()); + try (Scope ignore = span.makeCurrent()) { + final List updateStatements = new LinkedList<>(); + for (ParsedStatement update : updates) { + Preconditions.checkArgument( + update.isUpdate(), + "Statement is not an update statement: " + update.getSqlWithoutComments()); + updateStatements.add(update.getStatement()); + } + checkOrCreateValidTransaction(Iterables.getFirst(updates, null), callType); + + ApiFuture res; + if (retryAbortsInternally) { + res = + executeStatementAsync( + callType, + RUN_BATCH_STATEMENT, + () -> { + checkTimedOut(); + return runWithRetry( + () -> { + try { + getStatementExecutor() + .invokeInterceptors( + RUN_BATCH_STATEMENT, + StatementExecutionStep.EXECUTE_STATEMENT, + ReadWriteTransaction.this); + long[] updateCounts = + get(txContextFuture).batchUpdate(updateStatements, options); + createAndAddRetriableBatchUpdate(updateStatements, updateCounts, options); + return updateCounts; + } catch (AbortedException e) { + throw e; + } catch (SpannerException e) { + createAndAddFailedBatchUpdate(e, updateStatements); + throw e; + } + }); + }, + // ignore interceptors here as they are invoked in the Callable. + InterceptorsUsage.IGNORE_INTERCEPTORS, + ImmutableList.of(SpannerGrpc.getExecuteBatchDmlMethod())); + } else { + res = + executeStatementAsync( + callType, + RUN_BATCH_STATEMENT, + () -> { + checkTimedOut(); + checkAborted(); + return get(txContextFuture).batchUpdate(updateStatements); + }, + SpannerGrpc.getExecuteBatchDmlMethod()); + } + ApiFutures.addCallback(res, new StatementResultCallback<>(), MoreExecutors.directExecutor()); + return res; } - ApiFutures.addCallback( - res, - new ApiFutureCallback() { - @Override - public void onFailure(Throwable t) { - if (t instanceof SpannerException) { - handlePossibleInvalidatingException((SpannerException) t); - } - } - - @Override - public void onSuccess(long[] result) {} - }, - MoreExecutors.directExecutor()); - return res; } @Override public ApiFuture writeAsync(CallType callType, Iterable mutations) { - Preconditions.checkNotNull(mutations); - // We actually don't need an underlying transaction yet, as mutations are buffered until commit. - // But we do need to verify that this transaction is valid, and to mark the start of the - // transaction. - checkValidStateAndMarkStarted(); - for (Mutation mutation : mutations) { - this.mutations.add(checkNotNull(mutation)); + try (Scope ignore = span.makeCurrent()) { + Preconditions.checkNotNull(mutations); + // We actually don't need an underlying transaction yet, as mutations are buffered until + // commit. + // But we do need to verify that this transaction is valid, and to mark the start of the + // transaction. + checkValidStateAndMarkStarted(); + for (Mutation mutation : mutations) { + this.mutations.add(checkNotNull(mutation)); + } + return ApiFutures.immediateFuture(null); } - return ApiFutures.immediateFuture(null); } private final Callable commitCallable = @@ -757,74 +862,87 @@ public Void call() { }; @Override - public ApiFuture commitAsync(CallType callType) { - checkOrCreateValidTransaction(COMMIT_STATEMENT, callType); - state = UnitOfWorkState.COMMITTING; - commitResponseFuture = SettableApiFuture.create(); - ApiFuture res; - // Check if this transaction actually needs to commit anything. - if (txContextFuture == null) { - // No actual transaction was started by this read/write transaction, which also means that we - // don't have to commit anything. - commitResponseFuture.set( - new CommitResponse( - Timestamp.fromProto(com.google.protobuf.Timestamp.getDefaultInstance()))); - state = UnitOfWorkState.COMMITTED; - res = SettableApiFuture.create(); - ((SettableApiFuture) res).set(null); - } else if (retryAbortsInternally) { - res = - executeStatementAsync( - callType, - COMMIT_STATEMENT, - () -> { - checkTimedOut(); - try { - return runWithRetry( - () -> { - getStatementExecutor() - .invokeInterceptors( - COMMIT_STATEMENT, - StatementExecutionStep.EXECUTE_STATEMENT, - ReadWriteTransaction.this); - return commitCallable.call(); - }); - } catch (Throwable t) { - commitResponseFuture.setException(t); - state = UnitOfWorkState.COMMIT_FAILED; + public ApiFuture commitAsync( + @Nonnull CallType callType, @Nonnull EndTransactionCallback callback) { + try (Scope ignore = span.makeCurrent()) { + checkOrCreateValidTransaction(COMMIT_STATEMENT, callType); + cancelScheduledKeepAlivePing(); + state = UnitOfWorkState.COMMITTING; + commitResponseFuture = SettableApiFuture.create(); + ApiFuture res; + // Check if this transaction actually needs to commit anything. + if (txContextFuture == null) { + // No actual transaction was started by this read/write transaction, which also means that + // we don't have to commit anything. + commitResponseFuture.set( + new CommitResponse( + Timestamp.fromProto(com.google.protobuf.Timestamp.getDefaultInstance()))); + callback.onSuccess(); + state = UnitOfWorkState.COMMITTED; + res = SettableApiFuture.create(); + ((SettableApiFuture) res).set(null); + } else if (retryAbortsInternally) { + res = + executeStatementAsync( + callType, + COMMIT_STATEMENT, + () -> { + checkTimedOut(); try { - txManager.close(); - } catch (Throwable t2) { - // Ignore. + Void result = + runWithRetry( + () -> { + getStatementExecutor() + .invokeInterceptors( + COMMIT_STATEMENT, + StatementExecutionStep.EXECUTE_STATEMENT, + ReadWriteTransaction.this); + return commitCallable.call(); + }); + callback.onSuccess(); + return result; + } catch (Throwable t) { + commitResponseFuture.setException(t); + callback.onFailure(); + state = UnitOfWorkState.COMMIT_FAILED; + try { + txManager.close(); + } catch (Throwable t2) { + // Ignore. + } + throw t; } - throw t; - } - }, - InterceptorsUsage.IGNORE_INTERCEPTORS, - ImmutableList.of(SpannerGrpc.getCommitMethod())); - } else { - res = - executeStatementAsync( - callType, - COMMIT_STATEMENT, - () -> { - checkTimedOut(); - try { - return commitCallable.call(); - } catch (Throwable t) { - commitResponseFuture.setException(t); - state = UnitOfWorkState.COMMIT_FAILED; + }, + InterceptorsUsage.IGNORE_INTERCEPTORS, + ImmutableList.of(SpannerGrpc.getCommitMethod())); + } else { + res = + executeStatementAsync( + callType, + COMMIT_STATEMENT, + () -> { + checkTimedOut(); try { - txManager.close(); - } catch (Throwable t2) { - // Ignore. + Void result = commitCallable.call(); + callback.onSuccess(); + return result; + } catch (Throwable t) { + commitResponseFuture.setException(t); + callback.onFailure(); + state = UnitOfWorkState.COMMIT_FAILED; + try { + txManager.close(); + } catch (Throwable t2) { + // Ignore. + } + throw t; } - throw t; - } - }, - SpannerGrpc.getCommitMethod()); + }, + SpannerGrpc.getCommitMethod()); + } + asyncEndUnitOfWorkSpan(); + return res; } - return res; } /** @@ -959,19 +1077,23 @@ private void addRetryStatement(RetriableStatement statement) { private void handleAborted(AbortedException aborted) { if (transactionRetryAttempts >= maxInternalRetries) { // If the same statement in transaction keeps aborting, then we need to abort here. + span.addEvent("Internal retry attempts exceeded"); throwAbortWithRetryAttemptsExceeded(); } else if (retryAbortsInternally) { logger.fine(toString() + ": Starting internal transaction retry"); while (true) { // First back off and then restart the transaction. long delay = aborted.getRetryDelayInMillis(); + span.addEvent( + "Transaction aborted. Backing off for " + delay + " milliseconds and retrying."); + span.setAttribute(TRANSACTION_RETRIED, true); try { if (delay > 0L) { //noinspection BusyWait Thread.sleep(delay); } else if (aborted.isEmulatorOnlySupportsOneTransactionException()) { //noinspection BusyWait - Thread.sleep(ThreadLocalRandom.current().nextInt(50)); + Thread.sleep(ThreadLocalRandom.current().nextInt(1, 5)); } } catch (InterruptedException ie) { Thread.currentThread().interrupt(); @@ -1107,20 +1229,39 @@ public Void call() { }; @Override - public ApiFuture rollbackAsync(CallType callType) { - return rollbackAsync(callType, true); + public ApiFuture rollbackAsync( + @Nonnull CallType callType, @Nonnull EndTransactionCallback callback) { + try (Scope ignore = span.makeCurrent()) { + callback.onSuccess(); + return rollbackAsync(callType, true); + } catch (Throwable throwable) { + callback.onFailure(); + throw throwable; + } } - private ApiFuture rollbackAsync(CallType callType, boolean updateStatus) { + private ApiFuture rollbackAsync(CallType callType, boolean updateStatusAndEndSpan) { ConnectionPreconditions.checkState( state == UnitOfWorkState.STARTED || state == UnitOfWorkState.ABORTED, "This transaction has status " + state.name()); - if (updateStatus) { + cancelScheduledKeepAlivePing(); + if (updateStatusAndEndSpan) { state = UnitOfWorkState.ROLLED_BACK; } if (txContextFuture != null && state != UnitOfWorkState.ABORTED) { - return executeStatementAsync( - callType, ROLLBACK_STATEMENT, rollbackCallable, SpannerGrpc.getRollbackMethod()); + ApiFuture result = + executeStatementAsync( + callType, ROLLBACK_STATEMENT, rollbackCallable, SpannerGrpc.getRollbackMethod()); + if (updateStatusAndEndSpan) { + // Note: We end the transaction span after executing the rollback to include the rollback in + // the transaction span. Even though both methods are executed asynchronously, they are both + // executed using the same single-threaded executor, meaning that the span will only be + // ended after the rollback has finished. + asyncEndUnitOfWorkSpan(); + } + return result; + } else if (updateStatusAndEndSpan) { + return asyncEndUnitOfWorkSpan(); } else { return ApiFutures.immediateFuture(null); } @@ -1168,18 +1309,20 @@ private Savepoint createAutoSavepoint() { @Override void rollbackToSavepoint(Savepoint savepoint) { - get(rollbackAsync(CallType.SYNC, false)); - // Mark the state of the transaction as rolled back to a savepoint. This will ensure that the - // transaction will retry the next time a statement is actually executed. - this.rolledBackToSavepointException = - (AbortedException) - SpannerExceptionFactory.newSpannerException( - ErrorCode.ABORTED, - "Transaction has been rolled back to a savepoint", - new RollbackToSavepointException(savepoint)); - // Clear all statements and mutations after the savepoint. - this.statements.subList(savepoint.getStatementPosition(), this.statements.size()).clear(); - this.mutations.subList(savepoint.getMutationPosition(), this.mutations.size()).clear(); + try (Scope ignore = span.makeCurrent()) { + get(rollbackAsync(CallType.SYNC, false)); + // Mark the state of the transaction as rolled back to a savepoint. This will ensure that the + // transaction will retry the next time a statement is actually executed. + this.rolledBackToSavepointException = + (AbortedException) + SpannerExceptionFactory.newSpannerException( + ErrorCode.ABORTED, + "Transaction has been rolled back to a savepoint", + new RollbackToSavepointException(savepoint)); + // Clear all statements and mutations after the savepoint. + this.statements.subList(savepoint.getStatementPosition(), this.statements.size()).clear(); + this.mutations.subList(savepoint.getMutationPosition(), this.mutations.size()).clear(); + } } /** diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/SingleUseTransaction.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/SingleUseTransaction.java index 164c3ae7ada..53a1bb03b10 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/SingleUseTransaction.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/SingleUseTransaction.java @@ -53,8 +53,11 @@ import com.google.common.util.concurrent.MoreExecutors; import com.google.spanner.admin.database.v1.DatabaseAdminGrpc; import com.google.spanner.v1.SpannerGrpc; +import io.opentelemetry.context.Scope; import java.time.Duration; +import java.util.Arrays; import java.util.concurrent.Callable; +import javax.annotation.Nonnull; /** * Transaction that is used when a {@link Connection} is in autocommit mode. Each method on this @@ -82,6 +85,7 @@ class SingleUseTransaction extends AbstractBaseUnitOfWork { private final boolean returnCommitStats; private final Duration maxCommitDelay; private final boolean internalMetdataQuery; + private final byte[] protoDescriptors; private volatile SettableApiFuture readTimestamp = null; private volatile TransactionRunner writeTransaction; private boolean used = false; @@ -97,6 +101,7 @@ static class Builder extends AbstractBaseUnitOfWork.Builder executeQueryAsync( || (statement.isUpdate() && (analyzeMode != AnalyzeMode.NONE || statement.hasReturningClause())), "The statement must be a query, or the statement must be DML and AnalyzeMode must be PLAN or PROFILE"); - checkAndMarkUsed(); - - if (statement.isUpdate()) { - if (analyzeMode != AnalyzeMode.NONE) { - return analyzeTransactionalUpdateAsync(callType, statement, analyzeMode); + try (Scope ignore = span.makeCurrent()) { + checkAndMarkUsed(); + + if (statement.isUpdate()) { + if (analyzeMode != AnalyzeMode.NONE) { + return analyzeTransactionalUpdateAsync(callType, statement, analyzeMode); + } + // DML with returning clause. + return executeDmlReturningAsync(callType, statement, options); } - // DML with returning clause. - return executeDmlReturningAsync(callType, statement, options); - } - // Do not use a read-only staleness for internal metadata queries. - final ReadOnlyTransaction currentTransaction = - internalMetdataQuery - ? dbClient.singleUseReadOnlyTransaction() - : dbClient.singleUseReadOnlyTransaction(readOnlyStaleness); - Callable callable = - () -> { - try { - ResultSet rs; - if (analyzeMode == AnalyzeMode.NONE) { - rs = currentTransaction.executeQuery(statement.getStatement(), options); - } else { - rs = - currentTransaction.analyzeQuery( - statement.getStatement(), analyzeMode.getQueryAnalyzeMode()); + // Do not use a read-only staleness for internal metadata queries. + final ReadOnlyTransaction currentTransaction = + internalMetdataQuery + ? dbClient.singleUseReadOnlyTransaction() + : dbClient.singleUseReadOnlyTransaction(readOnlyStaleness); + Callable callable = + () -> { + try { + ResultSet rs; + if (analyzeMode == AnalyzeMode.NONE) { + rs = currentTransaction.executeQuery(statement.getStatement(), options); + } else { + rs = + currentTransaction.analyzeQuery( + statement.getStatement(), analyzeMode.getQueryAnalyzeMode()); + } + // Return a DirectExecuteResultSet, which will directly do a next() call in order to + // ensure that the query is actually sent to Spanner. + ResultSet directRs = DirectExecuteResultSet.ofResultSet(rs); + state = UnitOfWorkState.COMMITTED; + readTimestamp.set(currentTransaction.getReadTimestamp()); + return directRs; + } catch (Throwable t) { + state = UnitOfWorkState.COMMIT_FAILED; + readTimestamp.set(null); + currentTransaction.close(); + throw t; } - // Return a DirectExecuteResultSet, which will directly do a next() call in order to - // ensure that the query is actually sent to Spanner. - ResultSet directRs = DirectExecuteResultSet.ofResultSet(rs); - state = UnitOfWorkState.COMMITTED; - readTimestamp.set(currentTransaction.getReadTimestamp()); - return directRs; - } catch (Throwable t) { - state = UnitOfWorkState.COMMIT_FAILED; - readTimestamp.set(null); - currentTransaction.close(); - throw t; - } - }; - readTimestamp = SettableApiFuture.create(); - return executeStatementAsync( - callType, statement, callable, SpannerGrpc.getExecuteStreamingSqlMethod()); + }; + readTimestamp = SettableApiFuture.create(); + return executeStatementAsync( + callType, statement, callable, SpannerGrpc.getExecuteStreamingSqlMethod()); + } } private ApiFuture executeDmlReturningAsync( @@ -295,26 +313,28 @@ public ApiFuture partitionQueryAsync( ParsedStatement query, PartitionOptions partitionOptions, QueryOption... options) { - Callable callable = - () -> { - try (BatchReadOnlyTransaction transaction = - batchClient.batchReadOnlyTransaction(readOnlyStaleness)) { - ResultSet resultSet = partitionQuery(transaction, partitionOptions, query, options); - readTimestamp.set(transaction.getReadTimestamp()); - state = UnitOfWorkState.COMMITTED; - return resultSet; - } catch (Throwable throwable) { - state = UnitOfWorkState.COMMIT_FAILED; - readTimestamp.set(null); - throw throwable; - } - }; - readTimestamp = SettableApiFuture.create(); - return executeStatementAsync( - callType, - query, - callable, - ImmutableList.of(SpannerGrpc.getExecuteSqlMethod(), SpannerGrpc.getCommitMethod())); + try (Scope ignore = span.makeCurrent()) { + Callable callable = + () -> { + try (BatchReadOnlyTransaction transaction = + batchClient.batchReadOnlyTransaction(readOnlyStaleness)) { + ResultSet resultSet = partitionQuery(transaction, partitionOptions, query, options); + readTimestamp.set(transaction.getReadTimestamp()); + state = UnitOfWorkState.COMMITTED; + return resultSet; + } catch (Throwable throwable) { + state = UnitOfWorkState.COMMIT_FAILED; + readTimestamp.set(null); + throw throwable; + } + }; + readTimestamp = SettableApiFuture.create(); + return executeStatementAsync( + callType, + query, + callable, + ImmutableList.of(SpannerGrpc.getExecuteSqlMethod(), SpannerGrpc.getCommitMethod())); + } } @Override @@ -373,29 +393,32 @@ public ApiFuture executeDdlAsync(CallType callType, final ParsedStatement ddl.getType() == StatementType.DDL, "Statement is not a ddl statement"); ConnectionPreconditions.checkState( !isReadOnly(), "DDL statements are not allowed in read-only mode"); - checkAndMarkUsed(); - - Callable callable = - () -> { - try { - OperationFuture operation; - if (DdlClient.isCreateDatabaseStatement(ddl.getSqlWithoutComments())) { - operation = - ddlClient.executeCreateDatabase( - ddl.getSqlWithoutComments(), dbClient.getDialect()); - } else { - operation = ddlClient.executeDdl(ddl.getSqlWithoutComments()); + try (Scope ignore = span.makeCurrent()) { + checkAndMarkUsed(); + span.setAttribute(DB_STATEMENT_KEY, ddl.getStatement().getSql()); + + Callable callable = + () -> { + try { + OperationFuture operation; + if (DdlClient.isCreateDatabaseStatement(ddl.getSqlWithoutComments())) { + operation = + ddlClient.executeCreateDatabase( + ddl.getSqlWithoutComments(), dbClient.getDialect()); + } else { + operation = ddlClient.executeDdl(ddl.getSqlWithoutComments(), protoDescriptors); + } + getWithStatementTimeout(operation, ddl); + state = UnitOfWorkState.COMMITTED; + return null; + } catch (Throwable t) { + state = UnitOfWorkState.COMMIT_FAILED; + throw t; } - getWithStatementTimeout(operation, ddl); - state = UnitOfWorkState.COMMITTED; - return null; - } catch (Throwable t) { - state = UnitOfWorkState.COMMIT_FAILED; - throw t; - } - }; - return executeStatementAsync( - callType, ddl, callable, DatabaseAdminGrpc.getUpdateDatabaseDdlMethod()); + }; + return executeStatementAsync( + callType, ddl, callable, DatabaseAdminGrpc.getUpdateDatabaseDdlMethod()); + } } @Override @@ -405,25 +428,27 @@ public ApiFuture executeUpdateAsync( Preconditions.checkArgument(update.isUpdate(), "Statement is not an update statement"); ConnectionPreconditions.checkState( !isReadOnly(), "Update statements are not allowed in read-only mode"); - checkAndMarkUsed(); - - ApiFuture res; - switch (autocommitDmlMode) { - case TRANSACTIONAL: - res = - ApiFutures.transform( - executeTransactionalUpdateAsync(callType, update, AnalyzeMode.NONE, options), - Tuple::x, - MoreExecutors.directExecutor()); - break; - case PARTITIONED_NON_ATOMIC: - res = executePartitionedUpdateAsync(callType, update, options); - break; - default: - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.FAILED_PRECONDITION, "Unknown dml mode: " + autocommitDmlMode); + try (Scope ignore = span.makeCurrent()) { + checkAndMarkUsed(); + + ApiFuture res; + switch (autocommitDmlMode) { + case TRANSACTIONAL: + res = + ApiFutures.transform( + executeTransactionalUpdateAsync(callType, update, AnalyzeMode.NONE, options), + Tuple::x, + MoreExecutors.directExecutor()); + break; + case PARTITIONED_NON_ATOMIC: + res = executePartitionedUpdateAsync(callType, update, options); + break; + default: + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Unknown dml mode: " + autocommitDmlMode); + } + return res; } - return res; } @Override @@ -436,12 +461,14 @@ public ApiFuture analyzeUpdateAsync( ConnectionPreconditions.checkState( autocommitDmlMode != AutocommitDmlMode.PARTITIONED_NON_ATOMIC, "Analyzing update statements is not supported for Partitioned DML"); - checkAndMarkUsed(); + try (Scope ignore = span.makeCurrent()) { + checkAndMarkUsed(); - return ApiFutures.transform( - executeTransactionalUpdateAsync(callType, update, analyzeMode, options), - Tuple::y, - MoreExecutors.directExecutor()); + return ApiFutures.transform( + executeTransactionalUpdateAsync(callType, update, analyzeMode, options), + Tuple::y, + MoreExecutors.directExecutor()); + } } @Override @@ -455,17 +482,20 @@ public ApiFuture executeBatchUpdateAsync( } ConnectionPreconditions.checkState( !isReadOnly(), "Batch update statements are not allowed in read-only mode"); - checkAndMarkUsed(); - - switch (autocommitDmlMode) { - case TRANSACTIONAL: - return executeTransactionalBatchUpdateAsync(callType, updates, options); - case PARTITIONED_NON_ATOMIC: - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.FAILED_PRECONDITION, "Batch updates are not allowed in " + autocommitDmlMode); - default: - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.FAILED_PRECONDITION, "Unknown dml mode: " + autocommitDmlMode); + try (Scope ignore = span.makeCurrent()) { + checkAndMarkUsed(); + + switch (autocommitDmlMode) { + case TRANSACTIONAL: + return executeTransactionalBatchUpdateAsync(callType, updates, options); + case PARTITIONED_NON_ATOMIC: + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, + "Batch updates are not allowed in " + autocommitDmlMode); + default: + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "Unknown dml mode: " + autocommitDmlMode); + } } } @@ -477,6 +507,9 @@ private TransactionRunner createWriteTransaction() { if (returnCommitStats) { numOptions++; } + if (excludeTxnFromChangeStreams) { + numOptions++; + } if (maxCommitDelay != null) { numOptions++; } @@ -491,6 +524,9 @@ private TransactionRunner createWriteTransaction() { if (returnCommitStats) { options[index++] = Options.commitStats(); } + if (excludeTxnFromChangeStreams) { + options[index++] = Options.excludeTxnFromChangeStreams(); + } if (maxCommitDelay != null) { options[index++] = Options.maxCommitDelay(maxCommitDelay); } @@ -560,10 +596,21 @@ private ApiFuture analyzeTransactionalUpdateAsync( private ApiFuture executePartitionedUpdateAsync( CallType callType, final ParsedStatement update, final UpdateOption... options) { + final UpdateOption[] effectiveOptions; + if (excludeTxnFromChangeStreams) { + if (options.length == 0) { + effectiveOptions = new UpdateOption[] {Options.excludeTxnFromChangeStreams()}; + } else { + effectiveOptions = Arrays.copyOf(options, options.length + 1); + effectiveOptions[effectiveOptions.length - 1] = Options.excludeTxnFromChangeStreams(); + } + } else { + effectiveOptions = options; + } Callable callable = () -> { try { - Long res = dbClient.executePartitionedUpdate(update.getStatement(), options); + Long res = dbClient.executePartitionedUpdate(update.getStatement(), effectiveOptions); state = UnitOfWorkState.COMMITTED; return res; } catch (Throwable t) { @@ -610,37 +657,41 @@ public ApiFuture writeAsync(CallType callType, final Iterable mu Preconditions.checkNotNull(mutations); ConnectionPreconditions.checkState( !isReadOnly(), "Update statements are not allowed in read-only mode"); - checkAndMarkUsed(); - - Callable callable = - () -> { - try { - writeTransaction = createWriteTransaction(); - Void res = - writeTransaction.run( - transaction -> { - transaction.buffer(mutations); - return null; - }); - state = UnitOfWorkState.COMMITTED; - return res; - } catch (Throwable t) { - state = UnitOfWorkState.COMMIT_FAILED; - throw t; - } - }; - return executeStatementAsync( - callType, COMMIT_STATEMENT, callable, SpannerGrpc.getCommitMethod()); + try (Scope ignore = span.makeCurrent()) { + checkAndMarkUsed(); + + Callable callable = + () -> { + try { + writeTransaction = createWriteTransaction(); + Void res = + writeTransaction.run( + transaction -> { + transaction.buffer(mutations); + return null; + }); + state = UnitOfWorkState.COMMITTED; + return res; + } catch (Throwable t) { + state = UnitOfWorkState.COMMIT_FAILED; + throw t; + } + }; + return executeStatementAsync( + callType, COMMIT_STATEMENT, callable, SpannerGrpc.getCommitMethod()); + } } @Override - public ApiFuture commitAsync(CallType callType) { + public ApiFuture commitAsync( + @Nonnull CallType callType, @Nonnull EndTransactionCallback callback) { throw SpannerExceptionFactory.newSpannerException( ErrorCode.FAILED_PRECONDITION, "Commit is not supported for single-use transactions"); } @Override - public ApiFuture rollbackAsync(CallType callType) { + public ApiFuture rollbackAsync( + @Nonnull CallType callType, @Nonnull EndTransactionCallback callback) { throw SpannerExceptionFactory.newSpannerException( ErrorCode.FAILED_PRECONDITION, "Rollback is not supported for single-use transactions"); } diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/SpannerPool.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/SpannerPool.java index b4ac7cf3a35..246d340b070 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/SpannerPool.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/SpannerPool.java @@ -158,6 +158,8 @@ static class SpannerPoolKey { private final boolean routeToLeader; private final boolean useVirtualGrpcTransportThreads; private final OpenTelemetry openTelemetry; + private final Boolean enableExtendedTracing; + private final Boolean enableApiTracing; @VisibleForTesting static SpannerPoolKey of(ConnectionOptions options) { @@ -186,6 +188,8 @@ private SpannerPoolKey(ConnectionOptions options) throws IOException { this.routeToLeader = options.isRouteToLeader(); this.useVirtualGrpcTransportThreads = options.isUseVirtualGrpcTransportThreads(); this.openTelemetry = options.getOpenTelemetry(); + this.enableExtendedTracing = options.isEnableExtendedTracing(); + this.enableApiTracing = options.isEnableApiTracing(); } @Override @@ -205,7 +209,9 @@ public boolean equals(Object o) { && Objects.equals(this.routeToLeader, other.routeToLeader) && Objects.equals( this.useVirtualGrpcTransportThreads, other.useVirtualGrpcTransportThreads) - && Objects.equals(this.openTelemetry, other.openTelemetry); + && Objects.equals(this.openTelemetry, other.openTelemetry) + && Objects.equals(this.enableExtendedTracing, other.enableExtendedTracing) + && Objects.equals(this.enableApiTracing, other.enableApiTracing); } @Override @@ -221,7 +227,9 @@ public int hashCode() { this.userAgent, this.routeToLeader, this.useVirtualGrpcTransportThreads, - this.openTelemetry); + this.openTelemetry, + this.enableExtendedTracing, + this.enableApiTracing); } } @@ -357,6 +365,12 @@ Spanner createSpanner(SpannerPoolKey key, ConnectionOptions options) { if (key.openTelemetry != null) { builder.setOpenTelemetry(key.openTelemetry); } + if (key.enableExtendedTracing != null) { + builder.setEnableExtendedTracing(key.enableExtendedTracing); + } + if (key.enableApiTracing != null) { + builder.setEnableApiTracing(key.enableApiTracing); + } if (key.numChannels != null) { builder.setNumChannels(key.numChannels); } diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/StatementResult.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/StatementResult.java index 77f544a046f..23c5d792a77 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/StatementResult.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/StatementResult.java @@ -75,10 +75,14 @@ enum ClientSideStatementType { SET_MAX_COMMIT_DELAY, SHOW_DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE, SET_DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE, + SHOW_KEEP_TRANSACTION_ALIVE, + SET_KEEP_TRANSACTION_ALIVE, SHOW_STATEMENT_TAG, SET_STATEMENT_TAG, SHOW_TRANSACTION_TAG, SET_TRANSACTION_TAG, + SHOW_EXCLUDE_TXN_FROM_CHANGE_STREAMS, + SET_EXCLUDE_TXN_FROM_CHANGE_STREAMS, BEGIN, COMMIT, ROLLBACK, @@ -88,6 +92,7 @@ enum ClientSideStatementType { START_BATCH_DML, RUN_BATCH, ABORT_BATCH, + RESET_ALL, SET_RPC_PRIORITY, SHOW_RPC_PRIORITY, SHOW_TRANSACTION_ISOLATION_LEVEL, @@ -105,6 +110,10 @@ enum ClientSideStatementType { PARTITION, RUN_PARTITION, RUN_PARTITIONED_QUERY, + SET_PROTO_DESCRIPTORS, + SET_PROTO_DESCRIPTORS_FILE_PATH, + SHOW_PROTO_DESCRIPTORS, + SHOW_PROTO_DESCRIPTORS_FILE_PATH } /** diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/StatementResultImpl.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/StatementResultImpl.java index 58a1f7ae1c0..ee5032463cd 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/StatementResultImpl.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/StatementResultImpl.java @@ -18,6 +18,7 @@ import static com.google.cloud.spanner.SpannerApiFutures.get; +import com.google.cloud.ByteArray; import com.google.cloud.Timestamp; import com.google.cloud.spanner.ResultSet; import com.google.cloud.spanner.ResultSets; @@ -147,6 +148,23 @@ static StatementResult resultSet( clientSideStatementType); } + /** + * Convenience method for creating a {@link StatementResult} containing a {@link ResultSet} with + * one BYTES column and one row that is created by a {@link ClientSideStatement}. + */ + static StatementResult resultSet( + String name, byte[] values, ClientSideStatementType clientSideStatementType) { + return of( + ResultSets.forRows( + Type.struct(StructField.of(name, Type.bytes())), + Collections.singletonList( + Struct.newBuilder() + .set(name) + .to(values != null ? ByteArray.copyFrom(values) : null) + .build())), + clientSideStatementType); + } + /** {@link StatementResult} containing no results. */ static StatementResult noResult() { return new StatementResultImpl((ClientSideStatementType) null); diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/UnitOfWork.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/UnitOfWork.java index 1c8bc6a29c0..ffa93d486e1 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/UnitOfWork.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/connection/UnitOfWork.java @@ -31,6 +31,7 @@ import com.google.cloud.spanner.TransactionContext; import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; import com.google.spanner.v1.ResultSetStats; +import io.opentelemetry.api.trace.Span; import java.util.concurrent.ExecutionException; import javax.annotation.Nonnull; @@ -65,6 +66,24 @@ public boolean isActive() { } } + /** + * Callback for end-of-transaction methods. This is used to commit or rollback connection state + * after an async commit/rollback of a database transaction. + */ + interface EndTransactionCallback { + /** + * This method will be called if the end-of-transaction method (commit or rollback) finished + * successfully, but before the {@link ApiFuture} that is returned by the method is done. + */ + void onSuccess(); + + /** + * This method will be called if the end-of-transaction method (commit or rollback) failed, but + * before the {@link ApiFuture} that is returned by the method is done. + */ + void onFailure(); + } + /** Cancel the currently running statement (if any and the statement may be cancelled). */ void cancel(); @@ -77,15 +96,22 @@ public boolean isActive() { /** @return true if this unit of work is still active. */ boolean isActive(); + /** @return the {@link Span} that is used by this {@link UnitOfWork}. */ + Span getSpan(); + + /** Returns true if this transaction can only be used for a single statement. */ + boolean isSingleUse(); + /** * Commits the changes in this unit of work to the database. For read-only transactions, this only * closes the {@link ReadContext}. This method will throw a {@link SpannerException} if called for * a {@link Type#BATCH}. * * @param callType Indicates whether the top-level call is a sync or async call. + * @param callback Callback that should be called when the commit succeeded or failed. * @return An {@link ApiFuture} that is done when the commit has finished. */ - ApiFuture commitAsync(CallType callType); + ApiFuture commitAsync(@Nonnull CallType callType, @Nonnull EndTransactionCallback callback); /** * Rollbacks any changes in this unit of work. For read-only transactions, this only closes the @@ -93,9 +119,11 @@ public boolean isActive() { * Type#BATCH}. * * @param callType Indicates whether the top-level call is a sync or async call. + * @param callback Callback that should be called when the rollback succeeded or failed. * @return An {@link ApiFuture} that is done when the rollback has finished. */ - ApiFuture rollbackAsync(CallType callType); + ApiFuture rollbackAsync( + @Nonnull CallType callType, @Nonnull EndTransactionCallback callback); /** @see Connection#savepoint(String) */ void savepoint(@Nonnull String name, @Nonnull Dialect dialect); diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/GapicSpannerRpc.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/GapicSpannerRpc.java index 53e360b801b..2360b5d5173 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/GapicSpannerRpc.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/GapicSpannerRpc.java @@ -56,6 +56,7 @@ import com.google.api.pathtemplate.PathTemplate; import com.google.cloud.RetryHelper; import com.google.cloud.RetryHelper.RetryHelperException; +import com.google.cloud.grpc.GcpManagedChannel; import com.google.cloud.grpc.GcpManagedChannelBuilder; import com.google.cloud.grpc.GcpManagedChannelOptions; import com.google.cloud.grpc.GcpManagedChannelOptions.GcpMetricsOptions; @@ -200,6 +201,7 @@ import java.util.concurrent.Callable; import java.util.concurrent.CancellationException; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedDeque; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; @@ -240,6 +242,7 @@ public class GapicSpannerRpc implements SpannerRpc { private final Set executeQueryRetryableCodes; private final RetrySettings readRetrySettings; private final Set readRetryableCodes; + private final RetrySettings commitRetrySettings; private final SpannerStub partitionedDmlStub; private final RetrySettings partitionedDmlRetrySettings; private final InstanceAdminStubSettings instanceAdminStubSettings; @@ -260,12 +263,18 @@ public class GapicSpannerRpc implements SpannerRpc { private final ScheduledExecutorService spannerWatchdog; + private final ConcurrentLinkedDeque responseObservers = + new ConcurrentLinkedDeque<>(); + private final boolean throttleAdministrativeRequests; private final RetrySettings retryAdministrativeRequestsSettings; private static final double ADMINISTRATIVE_REQUESTS_RATE_LIMIT = 1.0D; private static final ConcurrentMap ADMINISTRATIVE_REQUESTS_RATE_LIMITERS = new ConcurrentHashMap<>(); private final boolean leaderAwareRoutingEnabled; + private final boolean endToEndTracingEnabled; + private final int numChannels; + private final boolean isGrpcGcpExtensionEnabled; public static GapicSpannerRpc create(SpannerOptions options) { return new GapicSpannerRpc(options); @@ -317,6 +326,9 @@ public GapicSpannerRpc(final SpannerOptions options) { this.callCredentialsProvider = options.getCallCredentialsProvider(); this.compressorName = options.getCompressorName(); this.leaderAwareRoutingEnabled = options.isLeaderAwareRoutingEnabled(); + this.endToEndTracingEnabled = options.isEndToEndTracingEnabled(); + this.numChannels = options.getNumChannels(); + this.isGrpcGcpExtensionEnabled = options.isGrpcGcpExtensionEnabled(); if (initializeStubs) { // First check if SpannerOptions provides a TransportChannelProvider. Create one @@ -340,6 +352,8 @@ public GapicSpannerRpc(final SpannerOptions options) { MoreObjects.firstNonNull( options.getInterceptorProvider(), SpannerInterceptorProvider.createDefault(options.getOpenTelemetry()))) + // This sets the trace context headers. + .withTraceContext(endToEndTracingEnabled, options.getOpenTelemetry()) // This sets the response compressor (Server -> Client). .withEncoding(compressorName)) .setHeaderProvider(headerProviderWithUserAgent) @@ -379,6 +393,8 @@ public GapicSpannerRpc(final SpannerOptions options) { .withCheckInterval(checkInterval) .withClock(NanoClock.getDefaultClock()); + final String emulatorHost = System.getenv("SPANNER_EMULATOR_HOST"); + try { this.spannerStub = GrpcSpannerStub.create( @@ -388,6 +404,9 @@ public GapicSpannerRpc(final SpannerOptions options) { .setTransportChannelProvider(channelProvider) .setCredentialsProvider(credentialsProvider) .setStreamWatchdogProvider(watchdogProvider) + .setTracerFactory( + options.getApiTracerFactory( + /* isAdminClient = */ false, isEmulatorEnabled(options, emulatorHost))) .build()); this.readRetrySettings = options.getSpannerStubSettings().streamingReadSettings().getRetrySettings(); @@ -397,6 +416,8 @@ public GapicSpannerRpc(final SpannerOptions options) { options.getSpannerStubSettings().executeStreamingSqlSettings().getRetrySettings(); this.executeQueryRetryableCodes = options.getSpannerStubSettings().executeStreamingSqlSettings().getRetryableCodes(); + this.commitRetrySettings = + options.getSpannerStubSettings().commitSettings().getRetrySettings(); partitionedDmlRetrySettings = options .getSpannerStubSettings() @@ -413,6 +434,9 @@ public GapicSpannerRpc(final SpannerOptions options) { .setTransportChannelProvider(channelProvider) .setCredentialsProvider(credentialsProvider) .setStreamWatchdogProvider(watchdogProvider) + .setTracerFactory( + options.getApiTracerFactory( + /* isAdminClient = */ false, isEmulatorEnabled(options, emulatorHost))) .executeSqlSettings() .setRetrySettings(partitionedDmlRetrySettings); pdmlSettings.executeStreamingSqlSettings().setRetrySettings(partitionedDmlRetrySettings); @@ -439,6 +463,9 @@ public GapicSpannerRpc(final SpannerOptions options) { .setTransportChannelProvider(channelProvider) .setCredentialsProvider(credentialsProvider) .setStreamWatchdogProvider(watchdogProvider) + .setTracerFactory( + options.getApiTracerFactory( + /* isAdminClient = */ true, isEmulatorEnabled(options, emulatorHost))) .build(); this.instanceAdminStub = GrpcInstanceAdminStub.create(instanceAdminStubSettings); @@ -449,6 +476,9 @@ public GapicSpannerRpc(final SpannerOptions options) { .setTransportChannelProvider(channelProvider) .setCredentialsProvider(credentialsProvider) .setStreamWatchdogProvider(watchdogProvider) + .setTracerFactory( + options.getApiTracerFactory( + /* isAdminClient = */ true, isEmulatorEnabled(options, emulatorHost))) .build(); // Automatically retry RESOURCE_EXHAUSTED for GetOperation if auto-throttling of @@ -492,7 +522,7 @@ public UnaryCallable createUnaryCalla // Check whether the SPANNER_EMULATOR_HOST env var has been set, and if so, if the emulator // is actually running. - checkEmulatorConnection(options, channelProvider, credentialsProvider); + checkEmulatorConnection(options, channelProvider, credentialsProvider, emulatorHost); } catch (Exception e) { throw newSpannerException(e); } @@ -504,6 +534,8 @@ public UnaryCallable createUnaryCalla this.readRetryableCodes = null; this.executeQueryRetrySettings = null; this.executeQueryRetryableCodes = null; + this.commitRetrySettings = + SpannerStubSettings.newBuilder().commitSettings().getRetrySettings(); this.partitionedDmlStub = null; this.databaseAdminStubSettings = null; this.instanceAdminStubSettings = null; @@ -589,15 +621,11 @@ private static HeaderProvider headerProviderWithUserAgentFrom(HeaderProvider hea private static void checkEmulatorConnection( SpannerOptions options, TransportChannelProvider channelProvider, - CredentialsProvider credentialsProvider) + CredentialsProvider credentialsProvider, + String emulatorHost) throws IOException { - final String emulatorHost = System.getenv("SPANNER_EMULATOR_HOST"); // Only do the check if the emulator environment variable has been set to localhost. - if (options.getChannelProvider() == null - && emulatorHost != null - && options.getHost() != null - && options.getHost().startsWith("http://localhost") - && options.getHost().endsWith(emulatorHost)) { + if (isEmulatorEnabled(options, emulatorHost)) { // Do a quick check to see if the emulator is actually running. try { InstanceAdminStubSettings.Builder testEmulatorSettings = @@ -630,6 +658,15 @@ private static void checkEmulatorConnection( } } + private static boolean isEmulatorEnabled(SpannerOptions options, String emulatorHost) { + // Only do the check if the emulator environment variable has been set to localhost. + return options.getChannelProvider() == null + && emulatorHost != null + && options.getHost() != null + && options.getHost().startsWith("http://localhost") + && options.getHost().endsWith(emulatorHost); + } + private static final RetrySettings ADMIN_REQUESTS_LIMIT_EXCEEDED_RETRY_SETTINGS = RetrySettings.newBuilder() .setInitialRetryDelay(Duration.ofSeconds(5L)) @@ -1797,6 +1834,11 @@ public CommitResponse commit(CommitRequest commitRequest, @Nullable Map rollbackAsync(RollbackRequest request, @Nullable Map options) { GrpcCallContext context = @@ -1946,7 +1988,20 @@ GrpcCallContext newCallContext( boolean routeToLeader) { GrpcCallContext context = GrpcCallContext.createDefault(); if (options != null) { - context = context.withChannelAffinity(Option.CHANNEL_HINT.getLong(options).intValue()); + if (this.isGrpcGcpExtensionEnabled) { + // Set channel affinity in gRPC-GCP. + // Compute bounded channel hint to prevent gRPC-GCP affinity map from getting unbounded. + int boundedChannelHint = Option.CHANNEL_HINT.getLong(options).intValue() % this.numChannels; + context = + context.withCallOptions( + context + .getCallOptions() + .withOption( + GcpManagedChannel.AFFINITY_KEY, String.valueOf(boundedChannelHint))); + } else { + // Set channel affinity in GAX. + context = context.withChannelAffinity(Option.CHANNEL_HINT.getLong(options).intValue()); + } } if (compressorName != null) { // This sets the compressor for Client -> Server. @@ -1956,6 +2011,9 @@ GrpcCallContext newCallContext( if (routeToLeader && leaderAwareRoutingEnabled) { context = context.withExtraHeaders(metadataProvider.newRouteToLeaderHeader()); } + if (endToEndTracingEnabled) { + context = context.withExtraHeaders(metadataProvider.newEndToEndTracingHeader()); + } if (callCredentialsProvider != null) { CallCredentials callCredentials = callCredentialsProvider.getCallCredentials(); if (callCredentials != null) { @@ -1972,9 +2030,29 @@ GrpcCallContext newCallContext( return (GrpcCallContext) context.merge(apiCallContextFromContext); } + void registerResponseObserver(SpannerResponseObserver responseObserver) { + responseObservers.add(responseObserver); + } + + void unregisterResponseObserver(SpannerResponseObserver responseObserver) { + responseObservers.remove(responseObserver); + } + + void closeResponseObservers() { + responseObservers.forEach(SpannerResponseObserver::close); + responseObservers.clear(); + } + + @InternalApi + @VisibleForTesting + public int getNumActiveResponseObservers() { + return responseObservers.size(); + } + @Override public void shutdown() { this.rpcIsClosed = true; + closeResponseObservers(); if (this.spannerStub != null) { this.spannerStub.close(); this.partitionedDmlStub.close(); @@ -1996,6 +2074,7 @@ public void shutdown() { public void shutdownNow() { this.rpcIsClosed = true; + closeResponseObservers(); this.spannerStub.close(); this.partitionedDmlStub.close(); this.instanceAdminStub.close(); @@ -2053,7 +2132,7 @@ public void cancel(@Nullable String message) { * A {@code ResponseObserver} that exposes the {@code StreamController} and delegates callbacks to * the {@link ResultStreamConsumer}. */ - private static class SpannerResponseObserver implements ResponseObserver { + private class SpannerResponseObserver implements ResponseObserver { private StreamController controller; private final ResultStreamConsumer consumer; @@ -2062,13 +2141,21 @@ public SpannerResponseObserver(ResultStreamConsumer consumer) { this.consumer = consumer; } + void close() { + if (this.controller != null) { + this.controller.cancel(); + } + } + @Override public void onStart(StreamController controller) { - // Disable the auto flow control to allow client library // set the number of messages it prefers to request controller.disableAutoInboundFlowControl(); this.controller = controller; + if (this.consumer.cancelQueryWhenClientIsClosed()) { + registerResponseObserver(this); + } } @Override @@ -2078,11 +2165,19 @@ public void onResponse(PartialResultSet response) { @Override public void onError(Throwable t) { + // Unregister the response observer when the query has completed with an error. + if (this.consumer.cancelQueryWhenClientIsClosed()) { + unregisterResponseObserver(this); + } consumer.onError(newSpannerException(t)); } @Override public void onComplete() { + // Unregister the response observer when the query has completed normally. + if (this.consumer.cancelQueryWhenClientIsClosed()) { + unregisterResponseObserver(this); + } consumer.onCompleted(); } diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/HeaderInterceptor.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/HeaderInterceptor.java index 7de63dc33ba..dd414bed397 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/HeaderInterceptor.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/HeaderInterceptor.java @@ -15,6 +15,7 @@ */ package com.google.cloud.spanner.spi.v1; +import static com.google.api.gax.grpc.GrpcCallContext.TRACER_KEY; import static com.google.cloud.spanner.spi.v1.SpannerRpcViews.DATABASE_ID; import static com.google.cloud.spanner.spi.v1.SpannerRpcViews.INSTANCE_ID; import static com.google.cloud.spanner.spi.v1.SpannerRpcViews.METHOD; @@ -22,13 +23,21 @@ import static com.google.cloud.spanner.spi.v1.SpannerRpcViews.SPANNER_GFE_HEADER_MISSING_COUNT; import static com.google.cloud.spanner.spi.v1.SpannerRpcViews.SPANNER_GFE_LATENCY; +import com.google.api.gax.tracing.ApiTracer; +import com.google.cloud.spanner.BuiltInMetricsConstant; +import com.google.cloud.spanner.CompositeTracer; +import com.google.cloud.spanner.SpannerExceptionFactory; import com.google.cloud.spanner.SpannerRpcMetrics; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.spanner.admin.database.v1.DatabaseName; import io.grpc.CallOptions; import io.grpc.Channel; import io.grpc.ClientCall; import io.grpc.ClientInterceptor; import io.grpc.ForwardingClientCall.SimpleForwardingClientCall; import io.grpc.ForwardingClientCallListener.SimpleForwardingClientCallListener; +import io.grpc.Grpc; import io.grpc.Metadata; import io.grpc.MethodDescriptor; import io.opencensus.stats.MeasureMap; @@ -40,6 +49,13 @@ import io.opencensus.tags.Tags; import io.opentelemetry.api.common.Attributes; import io.opentelemetry.api.common.AttributesBuilder; +import io.opentelemetry.api.trace.Span; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ExecutionException; import java.util.logging.Level; import java.util.logging.Logger; import java.util.regex.Matcher; @@ -50,15 +66,24 @@ * Missing count metrics. */ class HeaderInterceptor implements ClientInterceptor { - + private static final DatabaseName UNDEFINED_DATABASE_NAME = + DatabaseName.of("undefined-project", "undefined-instance", "undefined-database"); private static final Metadata.Key SERVER_TIMING_HEADER_KEY = Metadata.Key.of("server-timing", Metadata.ASCII_STRING_MARSHALLER); - private static final Pattern SERVER_TIMING_HEADER_PATTERN = Pattern.compile(".*dur=(?\\d+)"); + private static final String SERVER_TIMING_HEADER_PREFIX = "gfet4t7; dur="; private static final Metadata.Key GOOGLE_CLOUD_RESOURCE_PREFIX_KEY = Metadata.Key.of("google-cloud-resource-prefix", Metadata.ASCII_STRING_MARSHALLER); private static final Pattern GOOGLE_CLOUD_RESOURCE_PREFIX_PATTERN = Pattern.compile( ".*projects/(?\\p{ASCII}[^/]*)(/instances/(?\\p{ASCII}[^/]*))?(/databases/(?\\p{ASCII}[^/]*))?"); + private final Cache databaseNameCache = + CacheBuilder.newBuilder().maximumSize(100).build(); + private final Cache tagsCache = + CacheBuilder.newBuilder().maximumSize(1000).build(); + private final Cache attributesCache = + CacheBuilder.newBuilder().maximumSize(1000).build(); + private final Cache> builtInAttributesCache = + CacheBuilder.newBuilder().maximumSize(1000).build(); // Get the global singleton Tagger object. private static final Tagger TAGGER = Tags.getTagger(); @@ -72,57 +97,64 @@ class HeaderInterceptor implements ClientInterceptor { this.spannerRpcMetrics = spannerRpcMetrics; } - private class SpannerProperties { - String projectId; - String instanceId; - String databaseId; - - SpannerProperties(String projectId, String instanceId, String databaseId) { - this.databaseId = databaseId; - this.instanceId = instanceId; - this.projectId = projectId; - } - } - @Override public ClientCall interceptCall( MethodDescriptor method, CallOptions callOptions, Channel next) { + ApiTracer tracer = callOptions.getOption(TRACER_KEY); + CompositeTracer compositeTracer = + tracer instanceof CompositeTracer ? (CompositeTracer) tracer : null; return new SimpleForwardingClientCall(next.newCall(method, callOptions)) { @Override public void start(Listener responseListener, Metadata headers) { - SpannerProperties spannerProperties = createProjectPropertes(headers); - TagContext tagContext = getTagContext(method.getFullMethodName(), spannerProperties); - Attributes attributes = getMetricAttributes(method.getFullMethodName(), spannerProperties); - super.start( - new SimpleForwardingClientCallListener(responseListener) { - @Override - public void onHeaders(Metadata metadata) { - processHeader(metadata, tagContext, attributes); - super.onHeaders(metadata); - } - }, - headers); + try { + Span span = Span.current(); + DatabaseName databaseName = extractDatabaseName(headers); + String key = databaseName + method.getFullMethodName(); + TagContext tagContext = getTagContext(key, method.getFullMethodName(), databaseName); + Attributes attributes = + getMetricAttributes(key, method.getFullMethodName(), databaseName); + Map builtInMetricsAttributes = + getBuiltInMetricAttributes(key, databaseName); + super.start( + new SimpleForwardingClientCallListener(responseListener) { + @Override + public void onHeaders(Metadata metadata) { + Boolean isDirectPathUsed = + isDirectPathUsed(getAttributes().get(Grpc.TRANSPORT_ATTR_REMOTE_ADDR)); + addBuiltInMetricAttributes( + compositeTracer, builtInMetricsAttributes, isDirectPathUsed); + processHeader(metadata, tagContext, attributes, span); + super.onHeaders(metadata); + } + }, + headers); + } catch (ExecutionException executionException) { + // This should never happen, + throw SpannerExceptionFactory.asSpannerException(executionException.getCause()); + } } }; } - private void processHeader(Metadata metadata, TagContext tagContext, Attributes attributes) { + private void processHeader( + Metadata metadata, TagContext tagContext, Attributes attributes, Span span) { MeasureMap measureMap = STATS_RECORDER.newMeasureMap(); - if (metadata.get(SERVER_TIMING_HEADER_KEY) != null) { - String serverTiming = metadata.get(SERVER_TIMING_HEADER_KEY); - Matcher matcher = SERVER_TIMING_HEADER_PATTERN.matcher(serverTiming); - if (matcher.find()) { - try { - long latency = Long.parseLong(matcher.group("dur")); - measureMap.put(SPANNER_GFE_LATENCY, latency); - measureMap.put(SPANNER_GFE_HEADER_MISSING_COUNT, 0L); - measureMap.record(tagContext); - - spannerRpcMetrics.recordGfeLatency(latency, attributes); - spannerRpcMetrics.recordGfeHeaderMissingCount(0L, attributes); - } catch (NumberFormatException e) { - LOGGER.log(LEVEL, "Invalid server-timing object in header", matcher.group("dur")); + String serverTiming = metadata.get(SERVER_TIMING_HEADER_KEY); + if (serverTiming != null && serverTiming.startsWith(SERVER_TIMING_HEADER_PREFIX)) { + try { + long latency = Long.parseLong(serverTiming.substring(SERVER_TIMING_HEADER_PREFIX.length())); + measureMap.put(SPANNER_GFE_LATENCY, latency); + measureMap.put(SPANNER_GFE_HEADER_MISSING_COUNT, 0L); + measureMap.record(tagContext); + + spannerRpcMetrics.recordGfeLatency(latency, attributes); + spannerRpcMetrics.recordGfeHeaderMissingCount(0L, attributes); + + if (span != null) { + span.setAttribute("gfe_latency", String.valueOf(latency)); } + } catch (NumberFormatException e) { + LOGGER.log(LEVEL, "Invalid server-timing object in header: {}", serverTiming); } } else { spannerRpcMetrics.recordGfeHeaderMissingCount(1L, attributes); @@ -130,45 +162,97 @@ private void processHeader(Metadata metadata, TagContext tagContext, Attributes } } - private SpannerProperties createProjectPropertes(Metadata headers) { - String projectId = "undefined-project"; - String instanceId = "undefined-database"; - String databaseId = "undefined-database"; - if (headers.get(GOOGLE_CLOUD_RESOURCE_PREFIX_KEY) != null) { - String googleResourcePrefix = headers.get(GOOGLE_CLOUD_RESOURCE_PREFIX_KEY); - Matcher matcher = GOOGLE_CLOUD_RESOURCE_PREFIX_PATTERN.matcher(googleResourcePrefix); - if (matcher.find()) { - projectId = matcher.group("project"); - if (matcher.group("instance") != null) { - instanceId = matcher.group("instance"); - } - if (matcher.group("database") != null) { - databaseId = matcher.group("database"); - } - } else { - LOGGER.log(LEVEL, "Error parsing google cloud resource header: " + googleResourcePrefix); - } + private DatabaseName extractDatabaseName(Metadata headers) throws ExecutionException { + String googleResourcePrefix = headers.get(GOOGLE_CLOUD_RESOURCE_PREFIX_KEY); + if (googleResourcePrefix != null) { + return databaseNameCache.get( + googleResourcePrefix, + () -> { + String projectId = "undefined-project"; + String instanceId = "undefined-database"; + String databaseId = "undefined-database"; + Matcher matcher = GOOGLE_CLOUD_RESOURCE_PREFIX_PATTERN.matcher(googleResourcePrefix); + if (matcher.find()) { + projectId = matcher.group("project"); + if (matcher.group("instance") != null) { + instanceId = matcher.group("instance"); + } + if (matcher.group("database") != null) { + databaseId = matcher.group("database"); + } + } else { + LOGGER.log( + LEVEL, "Error parsing google cloud resource header: " + googleResourcePrefix); + } + return DatabaseName.of(projectId, instanceId, databaseId); + }); } - return new SpannerProperties(projectId, instanceId, databaseId); + return UNDEFINED_DATABASE_NAME; + } + + private TagContext getTagContext(String key, String method, DatabaseName databaseName) + throws ExecutionException { + return tagsCache.get( + key, + () -> + TAGGER + .currentBuilder() + .putLocal(PROJECT_ID, TagValue.create(databaseName.getProject())) + .putLocal(INSTANCE_ID, TagValue.create(databaseName.getInstance())) + .putLocal(DATABASE_ID, TagValue.create(databaseName.getDatabase())) + .putLocal(METHOD, TagValue.create(method)) + .build()); + } + + private Attributes getMetricAttributes(String key, String method, DatabaseName databaseName) + throws ExecutionException { + return attributesCache.get( + key, + () -> { + AttributesBuilder attributesBuilder = Attributes.builder(); + attributesBuilder.put("database", databaseName.getDatabase()); + attributesBuilder.put("instance_id", databaseName.getInstance()); + attributesBuilder.put("project_id", databaseName.getProject()); + attributesBuilder.put("method", method); + + return attributesBuilder.build(); + }); } - private TagContext getTagContext(String method, SpannerProperties spannerProperties) { - return TAGGER - .currentBuilder() - .putLocal(PROJECT_ID, TagValue.create(spannerProperties.projectId)) - .putLocal(INSTANCE_ID, TagValue.create(spannerProperties.instanceId)) - .putLocal(DATABASE_ID, TagValue.create(spannerProperties.databaseId)) - .putLocal(METHOD, TagValue.create(method)) - .build(); + private Map getBuiltInMetricAttributes(String key, DatabaseName databaseName) + throws ExecutionException { + return builtInAttributesCache.get( + key, + () -> { + Map attributes = new HashMap<>(); + attributes.put(BuiltInMetricsConstant.DATABASE_KEY.getKey(), databaseName.getDatabase()); + attributes.put( + BuiltInMetricsConstant.INSTANCE_ID_KEY.getKey(), databaseName.getInstance()); + return attributes; + }); } - private Attributes getMetricAttributes(String method, SpannerProperties spannerProperties) { - AttributesBuilder attributesBuilder = Attributes.builder(); - attributesBuilder.put("database", spannerProperties.databaseId); - attributesBuilder.put("instance_id", spannerProperties.instanceId); - attributesBuilder.put("project_id", spannerProperties.projectId); - attributesBuilder.put("method", method); + private void addBuiltInMetricAttributes( + CompositeTracer compositeTracer, + Map builtInMetricsAttributes, + Boolean isDirectPathUsed) { + if (compositeTracer != null) { + // Direct Path used attribute + Map attributes = new HashMap<>(builtInMetricsAttributes); + attributes.put( + BuiltInMetricsConstant.DIRECT_PATH_USED_KEY.getKey(), Boolean.toString(isDirectPathUsed)); + + compositeTracer.addAttributes(attributes); + } + } - return attributesBuilder.build(); + private Boolean isDirectPathUsed(SocketAddress remoteAddr) { + if (remoteAddr instanceof InetSocketAddress) { + InetAddress inetAddress = ((InetSocketAddress) remoteAddr).getAddress(); + String addr = inetAddress.getHostAddress(); + return addr.startsWith(BuiltInMetricsConstant.DP_IPV4_PREFIX) + || addr.startsWith(BuiltInMetricsConstant.DP_IPV6_PREFIX); + } + return false; } } diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerInterceptorProvider.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerInterceptorProvider.java index 9b1a2fd3c1f..b4d28ef0789 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerInterceptorProvider.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerInterceptorProvider.java @@ -74,6 +74,14 @@ SpannerInterceptorProvider withEncoding(String encoding) { return this; } + SpannerInterceptorProvider withTraceContext( + boolean endToEndTracingEnabled, OpenTelemetry openTelemetry) { + if (endToEndTracingEnabled) { + return with(new TraceContextInterceptor(openTelemetry)); + } + return this; + } + @Override public List getInterceptors() { return clientInterceptors; diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerMetadataProvider.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerMetadataProvider.java index 77406a5399b..2ebc4925788 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerMetadataProvider.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerMetadataProvider.java @@ -15,20 +15,29 @@ */ package com.google.cloud.spanner.spi.v1; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.common.base.MoreObjects; +import com.google.common.base.Strings; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; import com.google.common.collect.ImmutableMap; import io.grpc.Metadata; import io.grpc.Metadata.Key; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.ExecutionException; import java.util.regex.Matcher; import java.util.regex.Pattern; /** For internal use only. */ class SpannerMetadataProvider { + private final Cache>> extraHeadersCache = + CacheBuilder.newBuilder().maximumSize(100).build(); private final Map, String> headers; private final String resourceHeaderKey; private static final String ROUTE_TO_LEADER_HEADER_KEY = "x-goog-spanner-route-to-leader"; + private static final String END_TO_END_TRACING_HEADER_KEY = "x-goog-spanner-end-to-end-tracing"; private static final Pattern[] RESOURCE_TOKEN_PATTERNS = { Pattern.compile("^(?projects/[^/]*/instances/[^/]*/databases/[^/]*)(.*)?"), Pattern.compile("^(?projects/[^/]*/instances/[^/]*)(.*)?") @@ -36,6 +45,8 @@ class SpannerMetadataProvider { private static final Map> ROUTE_TO_LEADER_HEADER_MAP = ImmutableMap.of(ROUTE_TO_LEADER_HEADER_KEY, Collections.singletonList("true")); + private static final Map> END_TO_END_TRACING_HEADER_MAP = + ImmutableMap.of(END_TO_END_TRACING_HEADER_KEY, Collections.singletonList("true")); private SpannerMetadataProvider(Map headers, String resourceHeaderKey) { this.resourceHeaderKey = resourceHeaderKey; @@ -61,18 +72,30 @@ Metadata newMetadata(String resourceTokenTemplate, String defaultResourceToken) Map> newExtraHeaders( String resourceTokenTemplate, String defaultResourceToken) { - return ImmutableMap.>builder() - .put( - resourceHeaderKey, - Collections.singletonList( - getResourceHeaderValue(resourceTokenTemplate, defaultResourceToken))) - .build(); + try { + return extraHeadersCache.get( + MoreObjects.firstNonNull(resourceTokenTemplate, ""), + () -> + ImmutableMap.>builder() + .put( + resourceHeaderKey, + Collections.singletonList( + getResourceHeaderValue(resourceTokenTemplate, defaultResourceToken))) + .build()); + } catch (ExecutionException executionException) { + // This should never happen. + throw SpannerExceptionFactory.asSpannerException(executionException.getCause()); + } } Map> newRouteToLeaderHeader() { return ROUTE_TO_LEADER_HEADER_MAP; } + Map> newEndToEndTracingHeader() { + return END_TO_END_TRACING_HEADER_MAP; + } + private Map, String> constructHeadersAsMetadata( Map headers) { ImmutableMap.Builder, String> headersAsMetadataBuilder = @@ -86,7 +109,7 @@ private Map, String> constructHeadersAsMetadata( private String getResourceHeaderValue(String resourceTokenTemplate, String defaultResourceToken) { String resourceToken = defaultResourceToken; - if (resourceTokenTemplate != null) { + if (!Strings.isNullOrEmpty(resourceTokenTemplate)) { for (Pattern pattern : RESOURCE_TOKEN_PATTERNS) { Matcher m = pattern.matcher(resourceTokenTemplate); if (m.matches()) { diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerRpc.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerRpc.java index f063a7a3138..0b040df4197 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerRpc.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/SpannerRpc.java @@ -94,7 +94,8 @@ T get(@Nullable Map options) { return (T) options.get(this); } - Long getLong(@Nullable Map options) { + @InternalApi + public Long getLong(@Nullable Map options) { return get(options); } @@ -152,6 +153,15 @@ interface ResultStreamConsumer { void onCompleted(); void onError(SpannerException e); + + /** + * Returns true if the stream should be cancelled when the Spanner client is closed. This + * returns true for {@link com.google.cloud.spanner.BatchReadOnlyTransaction}, as these use a + * non-pooled session. Pooled sessions are deleted when the Spanner client is closed, and this + * automatically also cancels any query that uses the session, which means that we don't need to + * explicitly cancel those queries when the Spanner client is closed. + */ + boolean cancelQueryWhenClientIsClosed(); } /** Handle for cancellation of a streaming read or query call. */ @@ -469,6 +479,10 @@ CommitResponse commit(CommitRequest commitRequest, @Nullable Map opti ApiFuture commitAsync( CommitRequest commitRequest, @Nullable Map options); + default RetrySettings getCommitRetrySettings() { + return SpannerStubSettings.newBuilder().commitSettings().getRetrySettings(); + } + void rollback(RollbackRequest request, @Nullable Map options) throws SpannerException; ApiFuture rollbackAsync(RollbackRequest request, @Nullable Map options); diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/TraceContextInterceptor.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/TraceContextInterceptor.java new file mode 100644 index 00000000000..3b46ba4f880 --- /dev/null +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/spi/v1/TraceContextInterceptor.java @@ -0,0 +1,73 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner.spi.v1; + +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ClientInterceptor; +import io.grpc.ForwardingClientCall.SimpleForwardingClientCall; +import io.grpc.ForwardingClientCallListener.SimpleForwardingClientCallListener; +import io.grpc.Metadata; +import io.grpc.MethodDescriptor; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.context.Context; +import io.opentelemetry.context.propagation.TextMapPropagator; +import io.opentelemetry.context.propagation.TextMapSetter; + +/** + * Intercepts all gRPC calls and injects trace context related headers to propagate trace context to + * Spanner. This class takes reference from OpenTelemetry's JAVA instrumentation library for gRPC. + * https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/9ecf7965aa455d41ea8cc0761b6c6b6eeb106324/instrumentation/grpc-1.6/library/src/main/java/io/opentelemetry/instrumentation/grpc/v1_6/TracingClientInterceptor.java#L27 + */ +class TraceContextInterceptor implements ClientInterceptor { + + private final TextMapPropagator textMapPropagator; + + TraceContextInterceptor(OpenTelemetry openTelemetry) { + this.textMapPropagator = openTelemetry.getPropagators().getTextMapPropagator(); + } + + enum MetadataSetter implements TextMapSetter { + INSTANCE; + + @SuppressWarnings("null") + @Override + public void set(Metadata carrier, String key, String value) { + carrier.put(Metadata.Key.of(key, Metadata.ASCII_STRING_MARSHALLER), value); + } + } + + private static final class NoopSimpleForwardingClientCallListener + extends SimpleForwardingClientCallListener { + public NoopSimpleForwardingClientCallListener(ClientCall.Listener responseListener) { + super(responseListener); + } + } + + @Override + public ClientCall interceptCall( + MethodDescriptor method, CallOptions callOptions, Channel next) { + return new SimpleForwardingClientCall(next.newCall(method, callOptions)) { + @Override + public void start(Listener responseListener, Metadata headers) { + Context parentContext = Context.current(); + textMapPropagator.inject(parentContext, headers, MetadataSetter.INSTANCE); + super.start(new NoopSimpleForwardingClientCallListener(responseListener), headers); + } + }; + } +} diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/SpannerClient.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/SpannerClient.java index 64ffdb0902a..8625c17785a 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/SpannerClient.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/SpannerClient.java @@ -1629,6 +1629,7 @@ public final Transaction beginTransaction(String session, TransactionOptions opt * SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) * .setOptions(TransactionOptions.newBuilder().build()) * .setRequestOptions(RequestOptions.newBuilder().build()) + * .setMutationKey(Mutation.newBuilder().build()) * .build(); * Transaction response = spannerClient.beginTransaction(request); * } @@ -1662,6 +1663,7 @@ public final Transaction beginTransaction(BeginTransactionRequest request) { * SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) * .setOptions(TransactionOptions.newBuilder().build()) * .setRequestOptions(RequestOptions.newBuilder().build()) + * .setMutationKey(Mutation.newBuilder().build()) * .build(); * ApiFuture future = spannerClient.beginTransactionCallable().futureCall(request); * // Do something. @@ -1911,6 +1913,7 @@ public final CommitResponse commit( * .setReturnCommitStats(true) * .setMaxCommitDelay(Duration.newBuilder().build()) * .setRequestOptions(RequestOptions.newBuilder().build()) + * .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) * .build(); * CommitResponse response = spannerClient.commit(request); * } @@ -1955,6 +1958,7 @@ public final CommitResponse commit(CommitRequest request) { * .setReturnCommitStats(true) * .setMaxCommitDelay(Duration.newBuilder().build()) * .setRequestOptions(RequestOptions.newBuilder().build()) + * .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) * .build(); * ApiFuture future = spannerClient.commitCallable().futureCall(request); * // Do something. diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/SpannerSettings.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/SpannerSettings.java index a785ff6d993..721e874e01e 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/SpannerSettings.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/SpannerSettings.java @@ -76,7 +76,9 @@ *

    The builder of this class is recursive, so contained classes are themselves builders. When * build() is called, the tree of builders is called to create the complete settings object. * - *

    For example, to set the total timeout of createSession to 30 seconds: + *

    For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of createSession: * *

    {@code
      * // This snippet has been automatically generated and should be regarded as a code template only.
    @@ -92,10 +94,21 @@
      *             .createSessionSettings()
      *             .getRetrySettings()
      *             .toBuilder()
    - *             .setTotalTimeout(Duration.ofSeconds(30))
    + *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
    + *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
    + *             .setMaxAttempts(5)
    + *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
    + *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
    + *             .setRetryDelayMultiplier(1.3)
    + *             .setRpcTimeoutMultiplier(1.5)
    + *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
      *             .build());
      * SpannerSettings spannerSettings = spannerSettingsBuilder.build();
      * }
    + * + * Please refer to the [Client Side Retry + * Guide](https://github.com/googleapis/google-cloud-java/blob/main/docs/client_retries.md) for + * additional support in setting retries. */ @Generated("by gapic-generator-java") public class SpannerSettings extends ClientSettings { diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/SpannerStubSettings.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/SpannerStubSettings.java index 4a60eb3ef9b..ae4b1c11daf 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/SpannerStubSettings.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/v1/stub/SpannerStubSettings.java @@ -21,6 +21,7 @@ import com.google.api.core.ApiFunction; import com.google.api.core.ApiFuture; import com.google.api.core.BetaApi; +import com.google.api.core.ObsoleteApi; import com.google.api.gax.core.GaxProperties; import com.google.api.gax.core.GoogleCredentialsProvider; import com.google.api.gax.core.InstantiatingExecutorProvider; @@ -93,7 +94,9 @@ *

    The builder of this class is recursive, so contained classes are themselves builders. When * build() is called, the tree of builders is called to create the complete settings object. * - *

    For example, to set the total timeout of createSession to 30 seconds: + *

    For example, to set the + * [RetrySettings](https://cloud.google.com/java/docs/reference/gax/latest/com.google.api.gax.retrying.RetrySettings) + * of createSession: * *

    {@code
      * // This snippet has been automatically generated and should be regarded as a code template only.
    @@ -109,10 +112,21 @@
      *             .createSessionSettings()
      *             .getRetrySettings()
      *             .toBuilder()
    - *             .setTotalTimeout(Duration.ofSeconds(30))
    + *             .setInitialRetryDelayDuration(Duration.ofSeconds(1))
    + *             .setInitialRpcTimeoutDuration(Duration.ofSeconds(5))
    + *             .setMaxAttempts(5)
    + *             .setMaxRetryDelayDuration(Duration.ofSeconds(30))
    + *             .setMaxRpcTimeoutDuration(Duration.ofSeconds(60))
    + *             .setRetryDelayMultiplier(1.3)
    + *             .setRpcTimeoutMultiplier(1.5)
    + *             .setTotalTimeoutDuration(Duration.ofSeconds(300))
      *             .build());
      * SpannerStubSettings spannerSettings = spannerSettingsBuilder.build();
      * }
    + * + * Please refer to the [Client Side Retry + * Guide](https://github.com/googleapis/google-cloud-java/blob/main/docs/client_retries.md) for + * additional support in setting retries. */ @Generated("by gapic-generator-java") public class SpannerStubSettings extends StubSettings { @@ -176,9 +190,7 @@ public String extractNextToken(ListSessionsResponse payload) { @Override public Iterable extractResources(ListSessionsResponse payload) { - return payload.getSessionsList() == null - ? ImmutableList.of() - : payload.getSessionsList(); + return payload.getSessionsList(); } }; @@ -311,6 +323,7 @@ public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuild } /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") public static String getDefaultEndpoint() { return "spanner.googleapis.com:443"; } @@ -446,13 +459,19 @@ public static class Builder extends StubSettings.BuildernewArrayList(StatusCode.Code.UNAVAILABLE))); + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.UNAVAILABLE, StatusCode.Code.RESOURCE_EXHAUSTED))); definitions.put( "retry_policy_2_codes", - ImmutableSet.copyOf(Lists.newArrayList(StatusCode.Code.UNAVAILABLE))); + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.UNAVAILABLE, StatusCode.Code.RESOURCE_EXHAUSTED))); definitions.put( "retry_policy_1_codes", - ImmutableSet.copyOf(Lists.newArrayList(StatusCode.Code.UNAVAILABLE))); + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.UNAVAILABLE, StatusCode.Code.RESOURCE_EXHAUSTED))); definitions.put( "no_retry_0_codes", ImmutableSet.copyOf(Lists.newArrayList())); RETRYABLE_CODE_DEFINITIONS = definitions.build(); diff --git a/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.admin.database.v1/reflect-config.json b/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.admin.database.v1/reflect-config.json index 9518d8b2191..c7f99fa6ead 100644 --- a/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.admin.database.v1/reflect-config.json +++ b/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.admin.database.v1/reflect-config.json @@ -305,6 +305,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.api.ResourceDescriptor", "queryAllDeclaredConstructors": true, @@ -1646,6 +1664,42 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.database.v1.BackupSchedule", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.BackupSchedule$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.BackupScheduleSpec", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.BackupScheduleSpec$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig", "queryAllDeclaredConstructors": true, @@ -1772,6 +1826,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.database.v1.CreateBackupScheduleRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CreateBackupScheduleRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.database.v1.CreateDatabaseMetadata", "queryAllDeclaredConstructors": true, @@ -1808,6 +1880,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.database.v1.CrontabSpec", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.CrontabSpec$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.database.v1.Database", "queryAllDeclaredConstructors": true, @@ -1898,6 +1988,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.database.v1.DropDatabaseRequest", "queryAllDeclaredConstructors": true, @@ -1961,6 +2069,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.database.v1.FullBackupSpec", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.FullBackupSpec$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.database.v1.GetBackupRequest", "queryAllDeclaredConstructors": true, @@ -1979,6 +2105,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.database.v1.GetBackupScheduleRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.GetBackupScheduleRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.database.v1.GetDatabaseDdlRequest", "queryAllDeclaredConstructors": true, @@ -2033,6 +2177,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.database.v1.IncrementalBackupSpec", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.IncrementalBackupSpec$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.database.v1.ListBackupOperationsRequest", "queryAllDeclaredConstructors": true, @@ -2069,6 +2231,42 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupSchedulesRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupSchedulesRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupSchedulesResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.ListBackupSchedulesResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.database.v1.ListBackupsRequest", "queryAllDeclaredConstructors": true, @@ -2357,6 +2555,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata", "queryAllDeclaredConstructors": true, diff --git a/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.admin.instance.v1/reflect-config.json b/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.admin.instance.v1/reflect-config.json index 92d5f0c9a0b..cfe0908272e 100644 --- a/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.admin.instance.v1/reflect-config.json +++ b/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.admin.instance.v1/reflect-config.json @@ -305,6 +305,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.api.ResourceDescriptor", "queryAllDeclaredConstructors": true, @@ -1610,6 +1628,42 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AsymmetricAutoscalingOption", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AsymmetricAutoscalingOption$AutoscalingConfigOverrides", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AsymmetricAutoscalingOption$AutoscalingConfigOverrides$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AsymmetricAutoscalingOption$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.instance.v1.AutoscalingConfig$AutoscalingLimits", "queryAllDeclaredConstructors": true, @@ -1898,6 +1952,15 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.instance.v1.Instance$Edition", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.instance.v1.Instance$State", "queryAllDeclaredConstructors": true, @@ -2150,6 +2213,60 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.instance.v1.MoveInstanceMetadata", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.MoveInstanceMetadata$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.MoveInstanceRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.MoveInstanceRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.MoveInstanceResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.MoveInstanceResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.instance.v1.OperationProgress", "queryAllDeclaredConstructors": true, @@ -2168,6 +2285,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.instance.v1.ReplicaComputeCapacity", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ReplicaComputeCapacity$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.instance.v1.ReplicaInfo", "queryAllDeclaredConstructors": true, @@ -2195,6 +2330,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.instance.v1.ReplicaSelection", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.instance.v1.ReplicaSelection$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata", "queryAllDeclaredConstructors": true, diff --git a/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.v1/reflect-config.json b/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.v1/reflect-config.json index 04776be1ded..ffa070f1225 100644 --- a/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.v1/reflect-config.json +++ b/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.v1/reflect-config.json @@ -305,6 +305,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.api.PythonSettings$ExperimentalFeatures$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.api.ResourceDescriptor", "queryAllDeclaredConstructors": true, @@ -1727,6 +1745,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.v1.MultiplexedSessionPrecommitToken", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.MultiplexedSessionPrecommitToken$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.v1.Mutation", "queryAllDeclaredConstructors": true, @@ -1988,6 +2024,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.v1.ReadRequest$LockHint", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.v1.ReadRequest$OrderBy", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.v1.RequestOptions", "queryAllDeclaredConstructors": true, diff --git a/google-cloud-spanner/src/main/resources/com/google/cloud/spanner/connection/ClientSideStatements.json b/google-cloud-spanner/src/main/resources/com/google/cloud/spanner/connection/ClientSideStatements.json index da2f79b844e..e4f367d81eb 100644 --- a/google-cloud-spanner/src/main/resources/com/google/cloud/spanner/connection/ClientSideStatements.json +++ b/google-cloud-spanner/src/main/resources/com/google/cloud/spanner/connection/ClientSideStatements.json @@ -149,6 +149,15 @@ "method": "statementShowTransactionTag", "exampleStatements": ["show variable transaction_tag"] }, + { + "name": "SHOW VARIABLE EXCLUDE_TXN_FROM_CHANGE_STREAMS", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_EXCLUDE_TXN_FROM_CHANGE_STREAMS", + "regex": "(?is)\\A\\s*show\\s+variable\\s+exclude_txn_from_change_streams\\s*\\z", + "method": "statementShowExcludeTxnFromChangeStreams", + "exampleStatements": ["show variable exclude_txn_from_change_streams"] + }, { "name": "SHOW VARIABLE RPC_PRIORITY", "executorName": "ClientSideStatementNoParamExecutor", @@ -176,6 +185,15 @@ "method": "statementShowDelayTransactionStartUntilFirstWrite", "exampleStatements": ["show variable delay_transaction_start_until_first_write"] }, + { + "name": "SHOW VARIABLE KEEP_TRANSACTION_ALIVE", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_KEEP_TRANSACTION_ALIVE", + "regex": "(?is)\\A\\s*show\\s+variable\\s+keep_transaction_alive\\s*\\z", + "method": "statementShowKeepTransactionAlive", + "exampleStatements": ["show variable keep_transaction_alive"] + }, { "name": "PARTITION ", "executorName": "ClientSideStatementPartitionExecutor", @@ -270,6 +288,15 @@ "exampleStatements": ["abort batch"], "examplePrerequisiteStatements": ["start batch ddl"] }, + { + "name": "RESET ALL", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "NO_RESULT", + "statementType": "RESET_ALL", + "regex": "(?is)\\A\\s*(?:reset)(?:\\s+all)\\s*\\z", + "method": "statementResetAll", + "exampleStatements": ["reset all"] + }, { "name": "SET AUTOCOMMIT = TRUE|FALSE", "executorName": "ClientSideStatementSetExecutor", @@ -338,11 +365,21 @@ "statementType": "SET_STATEMENT_TIMEOUT", "regex": "(?is)\\A\\s*set\\s+statement_timeout\\s*(?:=)\\s*(.*)\\z", "method": "statementSetStatementTimeout", - "exampleStatements": ["set statement_timeout=null", "set statement_timeout='1s'", "set statement_timeout='100ms'", "set statement_timeout='10000us'", "set statement_timeout='9223372036854775807ns'"], + "exampleStatements": [ + "set statement_timeout=null", + "set statement_timeout = null ", + "set statement_timeout='1s'", + "set statement_timeout = '1s' ", + "set statement_timeout=100", + "set statement_timeout = 100 ", + "set statement_timeout='100ms'", + "set statement_timeout='10000us'", + "set statement_timeout='9223372036854775807ns'" + ], "setStatement": { "propertyName": "STATEMENT_TIMEOUT", "separator": "=", - "allowedValues": "('(\\d{1,19})(s|ms|us|ns)'|NULL)", + "allowedValues": "('(\\d{1,19})(s|ms|us|ns)'|\\d{1,19}|NULL)", "converterName": "ClientSideStatementValueConverters$DurationConverter" } }, @@ -458,11 +495,23 @@ "statementType": "SET_MAX_COMMIT_DELAY", "regex": "(?is)\\A\\s*set\\s+max_commit_delay\\s*(?:=)\\s*(.*)\\z", "method": "statementSetMaxCommitDelay", - "exampleStatements": ["set max_commit_delay=null", "set max_commit_delay='1s'", "set max_commit_delay='100ms'", "set max_commit_delay='10000us'", "set max_commit_delay='9223372036854775807ns'"], + "exampleStatements": [ + "set max_commit_delay=null", + "set max_commit_delay = null", + "set max_commit_delay = null ", + "set max_commit_delay=1000", + "set max_commit_delay = 1000", + "set max_commit_delay = 1000 ", + "set max_commit_delay='1s'", + "set max_commit_delay = '1s'", + "set max_commit_delay = '1s' ", + "set max_commit_delay='100ms'", + "set max_commit_delay='10000us'", + "set max_commit_delay='9223372036854775807ns'"], "setStatement": { "propertyName": "MAX_COMMIT_DELAY", "separator": "=", - "allowedValues": "('(\\d{1,19})(s|ms|us|ns)'|NULL)", + "allowedValues": "('(\\d{1,19})(s|ms|us|ns)'|\\d{1,19}|NULL)", "converterName": "ClientSideStatementValueConverters$DurationConverter" } }, @@ -497,6 +546,21 @@ "converterName": "ClientSideStatementValueConverters$StringValueConverter" } }, + { + "name": "SET EXCLUDE_TXN_FROM_CHANGE_STREAMS = TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_EXCLUDE_TXN_FROM_CHANGE_STREAMS", + "regex": "(?is)\\A\\s*set\\s+exclude_txn_from_change_streams\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetExcludeTxnFromChangeStreams", + "exampleStatements": ["set exclude_txn_from_change_streams = true", "set exclude_txn_from_change_streams = false"], + "setStatement": { + "propertyName": "EXCLUDE_TXN_FROM_CHANGE_STREAMS", + "separator": "=", + "allowedValues": "(TRUE|FALSE)", + "converterName": "ClientSideStatementValueConverters$BooleanConverter" + } + }, { "name": "SET RPC_PRIORITY = 'HIGH'|'MEDIUM'|'LOW'|'NULL'", "executorName": "ClientSideStatementSetExecutor", @@ -550,6 +614,21 @@ "converterName": "ClientSideStatementValueConverters$BooleanConverter" } }, + { + "name": "SET KEEP_TRANSACTION_ALIVE = TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_KEEP_TRANSACTION_ALIVE", + "regex": "(?is)\\A\\s*set\\s+keep_transaction_alive\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetKeepTransactionAlive", + "exampleStatements": ["set keep_transaction_alive = true", "set keep_transaction_alive = false"], + "setStatement": { + "propertyName": "KEEP_TRANSACTION_ALIVE", + "separator": "=", + "allowedValues": "(TRUE|FALSE)", + "converterName": "ClientSideStatementValueConverters$BooleanConverter" + } + }, { "name": "SHOW VARIABLE DATA_BOOST_ENABLED", "executorName": "ClientSideStatementNoParamExecutor", @@ -645,6 +724,56 @@ "allowedValues": "(\\d{1,9})", "converterName": "ClientSideStatementValueConverters$NonNegativeIntegerConverter" } + }, + { + "name": "SET PROTO_DESCRIPTORS = ''", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_PROTO_DESCRIPTORS", + "regex": "(?is)\\A\\s*set\\s+proto_descriptors\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetProtoDescriptors", + "exampleStatements": ["set proto_descriptors='protodescriptorsbase64'"], + "setStatement": { + "propertyName": "PROTO_DESCRIPTORS", + "separator": "=", + "allowedValues": "'((\\S+)|())'", + "converterName": "ClientSideStatementValueConverters$ProtoDescriptorsConverter" + } + }, + { + "name": "SET PROTO_DESCRIPTORS_FILE_PATH = ''", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_PROTO_DESCRIPTORS_FILE_PATH", + "regex": "(?is)\\A\\s*set\\s+proto_descriptors_file_path\\s*(?:=)\\s*(.*)\\z", + "method": "statementSetProtoDescriptorsFilePath", + "exampleStatements": ["set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'"], + "setStatement": { + "propertyName": "PROTO_DESCRIPTORS_FILE_PATH", + "separator": "=", + "allowedValues": "'((\\S+)|())'", + "converterName": "ClientSideStatementValueConverters$ProtoDescriptorsFileConverter" + } + }, + { + "name": "SHOW VARIABLE PROTO_DESCRIPTORS", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_PROTO_DESCRIPTORS", + "regex": "(?is)\\A\\s*show\\s+variable\\s+proto_descriptors\\s*\\z", + "method": "statementShowProtoDescriptors", + "exampleStatements": ["show variable proto_descriptors"] + }, + { + "name": "SHOW VARIABLE PROTO_DESCRIPTORS_FILE_PATH", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_PROTO_DESCRIPTORS_FILE_PATH", + "regex": "(?is)\\A\\s*show\\s+variable\\s+proto_descriptors_file_path\\s*\\z", + "method": "statementShowProtoDescriptorsFilePath", + "exampleStatements": [ + "show variable proto_descriptors_file_path" + ] } ] } diff --git a/google-cloud-spanner/src/main/resources/com/google/cloud/spanner/connection/PG_ClientSideStatements.json b/google-cloud-spanner/src/main/resources/com/google/cloud/spanner/connection/PG_ClientSideStatements.json index f641f7be0d9..a0960954a6a 100644 --- a/google-cloud-spanner/src/main/resources/com/google/cloud/spanner/connection/PG_ClientSideStatements.json +++ b/google-cloud-spanner/src/main/resources/com/google/cloud/spanner/connection/PG_ClientSideStatements.json @@ -149,6 +149,15 @@ "method": "statementShowTransactionTag", "exampleStatements": ["show spanner.transaction_tag","show variable spanner.transaction_tag"] }, + { + "name": "SHOW [VARIABLE] SPANNER.EXCLUDE_TXN_FROM_CHANGE_STREAMS", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_EXCLUDE_TXN_FROM_CHANGE_STREAMS", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.exclude_txn_from_change_streams\\s*\\z", + "method": "statementShowExcludeTxnFromChangeStreams", + "exampleStatements": ["show spanner.exclude_txn_from_change_streams","show variable spanner.exclude_txn_from_change_streams"] + }, { "name": "SHOW [VARIABLE] SPANNER.RPC_PRIORITY", "executorName": "ClientSideStatementNoParamExecutor", @@ -176,6 +185,15 @@ "method": "statementShowDelayTransactionStartUntilFirstWrite", "exampleStatements": ["show spanner.delay_transaction_start_until_first_write","show variable spanner.delay_transaction_start_until_first_write"] }, + { + "name": "SHOW [VARIABLE] SPANNER.KEEP_TRANSACTION_ALIVE", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "RESULT_SET", + "statementType": "SHOW_KEEP_TRANSACTION_ALIVE", + "regex": "(?is)\\A\\s*show\\s+(?:variable\\s+)?spanner\\.keep_transaction_alive\\s*\\z", + "method": "statementShowKeepTransactionAlive", + "exampleStatements": ["show spanner.keep_transaction_alive","show variable spanner.keep_transaction_alive"] + }, { "name": "SHOW [VARIABLE] TRANSACTION ISOLATION LEVEL", "executorName": "ClientSideStatementNoParamExecutor", @@ -319,6 +337,15 @@ "exampleStatements": ["abort batch"], "examplePrerequisiteStatements": ["start batch ddl"] }, + { + "name": "RESET ALL", + "executorName": "ClientSideStatementNoParamExecutor", + "resultType": "NO_RESULT", + "statementType": "RESET_ALL", + "regex": "(?is)\\A\\s*(?:reset)(?:\\s+all)\\s*\\z", + "method": "statementResetAll", + "exampleStatements": ["reset all"] + }, { "name": "SET AUTOCOMMIT =|TO TRUE|FALSE", "executorName": "ClientSideStatementSetExecutor", @@ -394,9 +421,13 @@ "method": "statementSetStatementTimeout", "exampleStatements": [ "set statement_timeout=default", + "set statement_timeout = default ", + "set statement_timeout = DEFAULT ", "set statement_timeout='1s'", + "set statement_timeout = '1s' ", "set statement_timeout='100ms'", "set statement_timeout=100", + "set statement_timeout = 100 ", "set statement_timeout='10000us'", "set statement_timeout='9223372036854775807ns'", "set statement_timeout to default", @@ -409,7 +440,7 @@ "setStatement": { "propertyName": "STATEMENT_TIMEOUT", "separator": "(?:=|\\s+TO\\s+)", - "allowedValues": "(\\d{1,19}|'(\\d{1,19})(s|ms|us|ns)'|DEFAULT)", + "allowedValues": "('(\\d{1,19})(s|ms|us|ns)'|\\d{1,19}|DEFAULT)", "converterName": "ClientSideStatementValueConverters$PgDurationConverter" } }, @@ -624,11 +655,23 @@ "statementType": "SET_MAX_COMMIT_DELAY", "regex": "(?is)\\A\\s*set\\s+spanner\\.max_commit_delay(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", "method": "statementSetMaxCommitDelay", - "exampleStatements": ["set spanner.max_commit_delay=null", "set spanner.max_commit_delay='1s'", "set spanner.max_commit_delay='100ms'", "set spanner.max_commit_delay to '10000us'", "set spanner.max_commit_delay TO '9223372036854775807ns'"], + "exampleStatements": [ + "set spanner.max_commit_delay=null", + "set spanner.max_commit_delay = NULL", + "set spanner.max_commit_delay = null ", + "set spanner.max_commit_delay='1s'", + "set spanner.max_commit_delay = '1s'", + "set spanner.max_commit_delay = '1s' ", + "set spanner.max_commit_delay=1000", + "set spanner.max_commit_delay = 1000", + "set spanner.max_commit_delay = 1000 ", + "set spanner.max_commit_delay='100ms'", + "set spanner.max_commit_delay to '10000us'", + "set spanner.max_commit_delay TO '9223372036854775807ns'"], "setStatement": { "propertyName": "SPANNER.MAX_COMMIT_DELAY", "separator": "(?:=|\\s+TO\\s+)", - "allowedValues": "('(\\d{1,19})(s|ms|us|ns)'|NULL)", + "allowedValues": "('(\\d{1,19})(s|ms|us|ns)'|\\d{1,19}|NULL)", "converterName": "ClientSideStatementValueConverters$DurationConverter" } }, @@ -679,6 +722,21 @@ "converterName": "ClientSideStatementValueConverters$StringValueConverter" } }, + { + "name": "SET SPANNER.EXCLUDE_TXN_FROM_CHANGE_STREAMS =|TO TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_EXCLUDE_TXN_FROM_CHANGE_STREAMS", + "regex": "(?is)\\A\\s*set\\s+spanner\\.exclude_txn_from_change_streams(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetReturnCommitStats", + "exampleStatements": ["set spanner.exclude_txn_from_change_streams = true", "set spanner.exclude_txn_from_change_streams = false", "set spanner.exclude_txn_from_change_streams to true", "set spanner.exclude_txn_from_change_streams to false"], + "setStatement": { + "propertyName": "SPANNER.EXCLUDE_TXN_FROM_CHANGE_STREAMS", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "(TRUE|FALSE)", + "converterName": "ClientSideStatementValueConverters$BooleanConverter" + } + }, { "name": "SET SPANNER.RPC_PRIORITY =|TO 'HIGH'|'MEDIUM'|'LOW'|'NULL'", "executorName": "ClientSideStatementSetExecutor", @@ -740,6 +798,21 @@ "converterName": "ClientSideStatementValueConverters$BooleanConverter" } }, + { + "name": "SET SPANNER.KEEP_TRANSACTION_ALIVE = TRUE|FALSE", + "executorName": "ClientSideStatementSetExecutor", + "resultType": "NO_RESULT", + "statementType": "SET_KEEP_TRANSACTION_ALIVE", + "regex": "(?is)\\A\\s*set\\s+spanner\\.keep_transaction_alive(?:\\s*=\\s*|\\s+to\\s+)(.*)\\z", + "method": "statementSetKeepTransactionAlive", + "exampleStatements": ["set spanner.keep_transaction_alive = true", "set spanner.keep_transaction_alive = false", "set spanner.keep_transaction_alive to true", "set spanner.keep_transaction_alive to false"], + "setStatement": { + "propertyName": "SPANNER.KEEP_TRANSACTION_ALIVE", + "separator": "(?:=|\\s+TO\\s+)", + "allowedValues": "(TRUE|FALSE)", + "converterName": "ClientSideStatementValueConverters$BooleanConverter" + } + }, { "name": "SHOW [VARIABLE] SPANNER.DATA_BOOST_ENABLED", "executorName": "ClientSideStatementNoParamExecutor", diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractMockServerTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractMockServerTest.java index bcc455ff9b1..76d13e73869 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractMockServerTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractMockServerTest.java @@ -18,9 +18,21 @@ import com.google.api.gax.grpc.testing.LocalChannelProvider; import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.admin.database.v1.MockDatabaseAdminImpl; +import com.google.cloud.spanner.admin.instance.v1.MockInstanceAdminImpl; +import com.google.longrunning.GetOperationRequest; +import com.google.longrunning.Operation; +import com.google.longrunning.OperationsGrpc.OperationsImplBase; +import com.google.protobuf.Any; +import com.google.protobuf.Empty; +import com.google.rpc.Code; +import com.google.rpc.Status; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; import io.grpc.Server; import io.grpc.inprocess.InProcessServerBuilder; +import io.grpc.stub.StreamObserver; import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -28,6 +40,9 @@ abstract class AbstractMockServerTest { protected static MockSpannerServiceImpl mockSpanner; + public static MockInstanceAdminImpl mockInstanceAdmin; + public static MockDatabaseAdminImpl mockDatabaseAdmin; + public static OperationsImplBase mockOperations; protected static Server server; protected static LocalChannelProvider channelProvider; @@ -37,9 +52,40 @@ abstract class AbstractMockServerTest { public static void startMockServer() throws IOException { mockSpanner = new MockSpannerServiceImpl(); mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + mockInstanceAdmin = new MockInstanceAdminImpl(); + mockDatabaseAdmin = new MockDatabaseAdminImpl(); + mockOperations = + new OperationsImplBase() { + AtomicBoolean done = new AtomicBoolean(false); + + @Override + public void getOperation( + GetOperationRequest request, + StreamObserver responseObserver) { + responseObserver.onNext( + Operation.newBuilder() + .setDone(done.getAndSet(!done.get())) + .setName(request.getName()) + .setMetadata( + Any.pack( + UpdateDatabaseDdlMetadata.newBuilder() + .setDatabase("projects/proj/instances/inst/databases/db") + .build())) + .setResponse(Any.pack(Empty.getDefaultInstance())) + .build()); + responseObserver.onCompleted(); + } + }; String uniqueName = InProcessServerBuilder.generateName(); - server = InProcessServerBuilder.forName(uniqueName).addService(mockSpanner).build().start(); + server = + InProcessServerBuilder.forName(uniqueName) + .addService(mockSpanner) + .addService(mockInstanceAdmin) + .addService(mockDatabaseAdmin) + .addService(mockOperations) + .build() + .start(); channelProvider = LocalChannelProvider.create(uniqueName); } @@ -67,4 +113,37 @@ public void cleanup() { mockSpanner.reset(); mockSpanner.removeAllExecutionTimes(); } + + void addUpdateDdlResponse() { + mockDatabaseAdmin.addResponse( + Operation.newBuilder() + .setMetadata( + Any.pack( + UpdateDatabaseDdlMetadata.newBuilder() + .setDatabase("projects/proj/instances/inst/databases/db") + .build())) + .setName("projects/proj/instances/inst/databases/db/operations/1") + .setDone(false) + .setResponse(Any.pack(Empty.getDefaultInstance())) + .build()); + } + + void addUpdateDdlError() { + mockDatabaseAdmin.addResponse( + Operation.newBuilder() + .setMetadata( + Any.pack( + UpdateDatabaseDdlMetadata.newBuilder() + .setDatabase("projects/proj/instances/inst/databases/db") + .build())) + .setName("projects/proj/instances/inst/databases/db/operations/1") + .setDone(true) + .setResponse(Any.pack(Empty.getDefaultInstance())) + .setError( + Status.newBuilder() + .setCode(Code.FAILED_PRECONDITION_VALUE) + .setMessage("test error") + .build()) + .build()); + } } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractReadContextTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractReadContextTest.java index 65f27d55810..ce7d6b300d1 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractReadContextTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AbstractReadContextTest.java @@ -33,8 +33,11 @@ import com.google.spanner.v1.ExecuteSqlRequest; import com.google.spanner.v1.ExecuteSqlRequest.QueryMode; import com.google.spanner.v1.ExecuteSqlRequest.QueryOptions; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.ReadRequest.OrderBy; import com.google.spanner.v1.RequestOptions; import com.google.spanner.v1.RequestOptions.Priority; +import com.google.spanner.v1.SessionName; import com.google.spanner.v1.TransactionSelector; import java.util.ArrayList; import java.util.Collection; @@ -223,6 +226,21 @@ public void testGetExecuteSqlRequestBuilderWithDataBoost() { assertTrue(request.getDataBoostEnabled()); } + @Test + public void testGetReadRequestBuilderWithOrderBy() { + ReadRequest request = + ReadRequest.newBuilder() + .setSession( + SessionName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]").toString()) + .setTransaction(TransactionSelector.newBuilder().build()) + .setTable("table110115790") + .setIndex("index100346066") + .addAllColumns(new ArrayList()) + .setOrderByValue(2) + .build(); + assertEquals(OrderBy.ORDER_BY_NO_ORDER, request.getOrderBy()); + } + @Test public void testGetExecuteBatchDmlRequestBuilderWithPriority() { ExecuteBatchDmlRequest.Builder request = diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncRunnerTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncRunnerTest.java index a209ff07112..d659e149282 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncRunnerTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncRunnerTest.java @@ -229,7 +229,7 @@ public void asyncRunnerUpdateAbortedWithoutGettingResult() throws Exception { public void asyncRunnerCommitFails() throws Exception { mockSpanner.setCommitExecutionTime( SimulatedExecutionTime.ofException( - Status.RESOURCE_EXHAUSTED + Status.INVALID_ARGUMENT .withDescription("mutation limit exceeded") .asRuntimeException())); AsyncRunner runner = client().runAsync(); @@ -245,7 +245,7 @@ public void asyncRunnerCommitFails() throws Exception { ExecutionException e = assertThrows(ExecutionException.class, () -> updateCount.get()); assertThat(e.getCause()).isInstanceOf(SpannerException.class); SpannerException se = (SpannerException) e.getCause(); - assertThat(se.getErrorCode()).isEqualTo(ErrorCode.RESOURCE_EXHAUSTED); + assertThat(se.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); assertThat(se.getMessage()).contains("mutation limit exceeded"); } @@ -432,7 +432,7 @@ public void asyncRunnerBatchUpdateAbortedWithoutGettingResult() throws Exception public void asyncRunnerWithBatchUpdateCommitFails() throws Exception { mockSpanner.setCommitExecutionTime( SimulatedExecutionTime.ofException( - Status.RESOURCE_EXHAUSTED + Status.INVALID_ARGUMENT .withDescription("mutation limit exceeded") .asRuntimeException())); AsyncRunner runner = client().runAsync(); @@ -448,7 +448,7 @@ public void asyncRunnerWithBatchUpdateCommitFails() throws Exception { ExecutionException e = assertThrows(ExecutionException.class, () -> updateCount.get()); assertThat(e.getCause()).isInstanceOf(SpannerException.class); SpannerException se = (SpannerException) e.getCause(); - assertThat(se.getErrorCode()).isEqualTo(ErrorCode.RESOURCE_EXHAUSTED); + assertThat(se.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); assertThat(se.getMessage()).contains("mutation limit exceeded"); } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncTransactionManagerTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncTransactionManagerTest.java index e96ea3a6a4b..9b05a18d714 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncTransactionManagerTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/AsyncTransactionManagerTest.java @@ -46,6 +46,7 @@ import com.google.common.collect.Range; import com.google.common.util.concurrent.MoreExecutors; import com.google.protobuf.AbstractMessage; +import com.google.protobuf.GeneratedMessageV3; import com.google.spanner.v1.BatchCreateSessionsRequest; import com.google.spanner.v1.BeginTransactionRequest; import com.google.spanner.v1.CommitRequest; @@ -345,33 +346,22 @@ public void asyncTransactionManagerFireAndForgetInvalidUpdate() throws Exception } } } + ImmutableList> expectedRequests = + ImmutableList.of( + BatchCreateSessionsRequest.class, + // The first update that fails. This will cause a transaction retry. + ExecuteSqlRequest.class, + // The retry will use an explicit BeginTransaction call. + BeginTransactionRequest.class, + // The first update will again fail, but now there is a transaction id, so the + // transaction can continue. + ExecuteSqlRequest.class, + ExecuteSqlRequest.class, + CommitRequest.class); if (isMultiplexedSessionsEnabled()) { - assertThat(mockSpanner.getRequestTypes()) - .containsExactly( - CreateSessionRequest.class, - BatchCreateSessionsRequest.class, - // The first update that fails. This will cause a transaction retry. - ExecuteSqlRequest.class, - // The retry will use an explicit BeginTransaction call. - BeginTransactionRequest.class, - // The first update will again fail, but now there is a transaction id, so the - // transaction can continue. - ExecuteSqlRequest.class, - ExecuteSqlRequest.class, - CommitRequest.class); + assertThat(mockSpanner.getRequestTypes()).containsAtLeastElementsIn(expectedRequests); } else { - assertThat(mockSpanner.getRequestTypes()) - .containsExactly( - BatchCreateSessionsRequest.class, - // The first update that fails. This will cause a transaction retry. - ExecuteSqlRequest.class, - // The retry will use an explicit BeginTransaction call. - BeginTransactionRequest.class, - // The first update will again fail, but now there is a transaction id, so the - // transaction can continue. - ExecuteSqlRequest.class, - ExecuteSqlRequest.class, - CommitRequest.class); + assertThat(mockSpanner.getRequestTypes()).containsExactlyElementsIn(expectedRequests); } } @@ -531,7 +521,7 @@ public void asyncTransactionManagerUpdateAbortedWithoutGettingResult() throws Ex public void asyncTransactionManagerCommitFails() throws Exception { mockSpanner.setCommitExecutionTime( SimulatedExecutionTime.ofException( - Status.RESOURCE_EXHAUSTED + Status.INVALID_ARGUMENT .withDescription("mutation limit exceeded") .asRuntimeException())); try (AsyncTransactionManager mgr = client().transactionManagerAsync()) { @@ -545,7 +535,7 @@ public void asyncTransactionManagerCommitFails() throws Exception { AsyncTransactionManagerHelper.executeUpdateAsync(UPDATE_STATEMENT), executor) .commitAsync())); - assertThat(e.getErrorCode()).isEqualTo(ErrorCode.RESOURCE_EXHAUSTED); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); assertThat(e.getMessage()).contains("mutation limit exceeded"); } } @@ -681,21 +671,16 @@ public void asyncTransactionManagerFireAndForgetInvalidBatchUpdate() throws Exce } } } + ImmutableList> expectedRequests = + ImmutableList.of( + BatchCreateSessionsRequest.class, + ExecuteBatchDmlRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class); if (isMultiplexedSessionsEnabled()) { - assertThat(mockSpanner.getRequestTypes()) - .containsExactly( - CreateSessionRequest.class, - BatchCreateSessionsRequest.class, - ExecuteBatchDmlRequest.class, - ExecuteBatchDmlRequest.class, - CommitRequest.class); + assertThat(mockSpanner.getRequestTypes()).containsAtLeastElementsIn(expectedRequests); } else { - assertThat(mockSpanner.getRequestTypes()) - .containsExactly( - BatchCreateSessionsRequest.class, - ExecuteBatchDmlRequest.class, - ExecuteBatchDmlRequest.class, - CommitRequest.class); + assertThat(mockSpanner.getRequestTypes()).containsExactlyElementsIn(expectedRequests); } } @@ -729,23 +714,17 @@ public void asyncTransactionManagerBatchUpdateAborted() throws Exception { assertThat(attempt.get()).isEqualTo(2); // There should only be 1 CommitRequest, as the first attempt should abort already after the // ExecuteBatchDmlRequest. + ImmutableList> expectedRequests = + ImmutableList.of( + BatchCreateSessionsRequest.class, + ExecuteBatchDmlRequest.class, + BeginTransactionRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class); if (isMultiplexedSessionsEnabled()) { - assertThat(mockSpanner.getRequestTypes()) - .containsExactly( - CreateSessionRequest.class, - BatchCreateSessionsRequest.class, - ExecuteBatchDmlRequest.class, - BeginTransactionRequest.class, - ExecuteBatchDmlRequest.class, - CommitRequest.class); + assertThat(mockSpanner.getRequestTypes()).containsAtLeastElementsIn(expectedRequests); } else { - assertThat(mockSpanner.getRequestTypes()) - .containsExactly( - BatchCreateSessionsRequest.class, - ExecuteBatchDmlRequest.class, - BeginTransactionRequest.class, - ExecuteBatchDmlRequest.class, - CommitRequest.class); + assertThat(mockSpanner.getRequestTypes()).containsExactlyElementsIn(expectedRequests); } } @@ -777,23 +756,17 @@ public void asyncTransactionManagerBatchUpdateAbortedBeforeFirstStatement() thro assertThat(attempt.get()).isEqualTo(2); // There should only be 1 CommitRequest, as the first attempt should abort already after the // ExecuteBatchDmlRequest. + ImmutableList> expectedRequests = + ImmutableList.of( + BatchCreateSessionsRequest.class, + ExecuteBatchDmlRequest.class, + BeginTransactionRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class); if (isMultiplexedSessionsEnabled()) { - assertThat(mockSpanner.getRequestTypes()) - .containsExactly( - CreateSessionRequest.class, - BatchCreateSessionsRequest.class, - ExecuteBatchDmlRequest.class, - BeginTransactionRequest.class, - ExecuteBatchDmlRequest.class, - CommitRequest.class); + assertThat(mockSpanner.getRequestTypes()).containsAtLeastElementsIn(expectedRequests); } else { - assertThat(mockSpanner.getRequestTypes()) - .containsExactly( - BatchCreateSessionsRequest.class, - ExecuteBatchDmlRequest.class, - BeginTransactionRequest.class, - ExecuteBatchDmlRequest.class, - CommitRequest.class); + assertThat(mockSpanner.getRequestTypes()).containsExactlyElementsIn(expectedRequests); } } @@ -843,25 +816,18 @@ public void asyncTransactionManagerWithBatchUpdateCommitAborted() throws Excepti } finally { mockSpanner.putStatementResult(StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); } + ImmutableList> expectedRequests = + ImmutableList.of( + BatchCreateSessionsRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class, + BeginTransactionRequest.class, + ExecuteBatchDmlRequest.class, + CommitRequest.class); if (isMultiplexedSessionsEnabled()) { - assertThat(mockSpanner.getRequestTypes()) - .containsExactly( - CreateSessionRequest.class, - BatchCreateSessionsRequest.class, - ExecuteBatchDmlRequest.class, - CommitRequest.class, - BeginTransactionRequest.class, - ExecuteBatchDmlRequest.class, - CommitRequest.class); + assertThat(mockSpanner.getRequestTypes()).containsAtLeastElementsIn(expectedRequests); } else { - assertThat(mockSpanner.getRequestTypes()) - .containsExactly( - BatchCreateSessionsRequest.class, - ExecuteBatchDmlRequest.class, - CommitRequest.class, - BeginTransactionRequest.class, - ExecuteBatchDmlRequest.class, - CommitRequest.class); + assertThat(mockSpanner.getRequestTypes()).containsExactlyElementsIn(expectedRequests); } } @@ -925,10 +891,10 @@ public void asyncTransactionManagerBatchUpdateAbortedWithoutGettingResult() thro } @Test - public void asyncTransactionManagerWithBatchUpdateCommitFails() throws Exception { + public void asyncTransactionManagerWithBatchUpdateCommitFails() { mockSpanner.setCommitExecutionTime( SimulatedExecutionTime.ofException( - Status.RESOURCE_EXHAUSTED + Status.INVALID_ARGUMENT .withDescription("mutation limit exceeded") .asRuntimeException())); try (AsyncTransactionManager manager = clientWithEmptySessionPool().transactionManagerAsync()) { @@ -945,20 +911,16 @@ public void asyncTransactionManagerWithBatchUpdateCommitFails() throws Exception ImmutableList.of(UPDATE_STATEMENT, UPDATE_STATEMENT)), executor) .commitAsync())); - assertThat(e.getErrorCode()).isEqualTo(ErrorCode.RESOURCE_EXHAUSTED); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); assertThat(e.getMessage()).contains("mutation limit exceeded"); } + ImmutableList> expectedRequests = + ImmutableList.of( + BatchCreateSessionsRequest.class, ExecuteBatchDmlRequest.class, CommitRequest.class); if (isMultiplexedSessionsEnabled()) { - assertThat(mockSpanner.getRequestTypes()) - .containsExactly( - CreateSessionRequest.class, - BatchCreateSessionsRequest.class, - ExecuteBatchDmlRequest.class, - CommitRequest.class); + assertThat(mockSpanner.getRequestTypes()).containsAtLeastElementsIn(expectedRequests); } else { - assertThat(mockSpanner.getRequestTypes()) - .containsExactly( - BatchCreateSessionsRequest.class, ExecuteBatchDmlRequest.class, CommitRequest.class); + assertThat(mockSpanner.getRequestTypes()).containsExactlyElementsIn(expectedRequests); } } @@ -983,17 +945,13 @@ public void asyncTransactionManagerWaitsUntilAsyncBatchUpdateHasFinished() throw } } } + ImmutableList> expectedRequests = + ImmutableList.of( + BatchCreateSessionsRequest.class, ExecuteBatchDmlRequest.class, CommitRequest.class); if (isMultiplexedSessionsEnabled()) { - assertThat(mockSpanner.getRequestTypes()) - .containsExactly( - CreateSessionRequest.class, - BatchCreateSessionsRequest.class, - ExecuteBatchDmlRequest.class, - CommitRequest.class); + assertThat(mockSpanner.getRequestTypes()).containsAtLeastElementsIn(expectedRequests); } else { - assertThat(mockSpanner.getRequestTypes()) - .containsExactly( - BatchCreateSessionsRequest.class, ExecuteBatchDmlRequest.class, CommitRequest.class); + assertThat(mockSpanner.getRequestTypes()).containsExactlyElementsIn(expectedRequests); } } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BatchCreateSessionsTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BatchCreateSessionsTest.java index c252bb19238..8d359428c77 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BatchCreateSessionsTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BatchCreateSessionsTest.java @@ -207,13 +207,13 @@ public void testSpannerReturnsAllAvailableSessionsAndThenNoSessions() } @Test - public void testSpannerReturnsResourceExhausted() throws InterruptedException { + public void testSpannerReturnsFailedPrecondition() throws InterruptedException { int minSessions = 100; int maxSessions = 1000; int expectedSessions; DatabaseClientImpl client; // Make the first BatchCreateSessions return an error. - mockSpanner.addException(Status.RESOURCE_EXHAUSTED.asRuntimeException()); + mockSpanner.addException(Status.FAILED_PRECONDITION.asRuntimeException()); try (Spanner spanner = createSpanner(minSessions, maxSessions)) { // Create a database client which will create a session pool. client = diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BuiltInOpenTelemetryMetricsProviderTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BuiltInOpenTelemetryMetricsProviderTest.java new file mode 100644 index 00000000000..43fe97113d0 --- /dev/null +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BuiltInOpenTelemetryMetricsProviderTest.java @@ -0,0 +1,66 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class BuiltInOpenTelemetryMetricsProviderTest { + + @Test + public void testGenerateClientHashWithSimpleUid() { + String clientUid = "testClient"; + verifyHash(BuiltInOpenTelemetryMetricsProvider.generateClientHash(clientUid)); + } + + @Test + public void testGenerateClientHashWithEmptyUid() { + String clientUid = ""; + verifyHash(BuiltInOpenTelemetryMetricsProvider.generateClientHash(clientUid)); + } + + @Test + public void testGenerateClientHashWithNullUid() { + String clientUid = null; + verifyHash(BuiltInOpenTelemetryMetricsProvider.generateClientHash(clientUid)); + } + + @Test + public void testGenerateClientHashWithLongUid() { + String clientUid = "aVeryLongUniqueClientIdentifierThatIsUnusuallyLong"; + verifyHash(BuiltInOpenTelemetryMetricsProvider.generateClientHash(clientUid)); + } + + @Test + public void testGenerateClientHashWithSpecialCharacters() { + String clientUid = "273d60f2-5604-42f1-b687-f5f1b975fd07@2316645@test#"; + verifyHash(BuiltInOpenTelemetryMetricsProvider.generateClientHash(clientUid)); + } + + private void verifyHash(String hash) { + // Check if the hash length is 6 + assertEquals(hash.length(), 6); + // Check if the hash is in the range [000000, 0003ff] + long hashValue = Long.parseLong(hash, 16); // Convert hash from hex to decimal + assertTrue(hashValue >= 0 && hashValue <= 0x3FF); + } +} diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/CloseSpannerWithOpenResultSetTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/CloseSpannerWithOpenResultSetTest.java new file mode 100644 index 00000000000..8d622579714 --- /dev/null +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/CloseSpannerWithOpenResultSetTest.java @@ -0,0 +1,193 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeFalse; + +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import com.google.cloud.spanner.spi.v1.GapicSpannerRpc; +import com.google.spanner.v1.DeleteSessionRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Status; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.threeten.bp.Duration; + +@RunWith(JUnit4.class) +public class CloseSpannerWithOpenResultSetTest extends AbstractMockServerTest { + + Spanner createSpanner() { + return SpannerOptions.newBuilder() + .setProjectId("p") + .setHost(String.format("http://localhost:%d", getPort())) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder().setWaitForMinSessions(Duration.ofSeconds(5L)).build()) + .build() + .getService(); + } + + @BeforeClass + public static void setWatchdogTimeout() { + System.setProperty("com.google.cloud.spanner.watchdogTimeoutSeconds", "1"); + } + + @AfterClass + public static void clearWatchdogTimeout() { + System.clearProperty("com.google.cloud.spanner.watchdogTimeoutSeconds"); + } + + @After + public void cleanup() { + mockSpanner.unfreeze(); + mockSpanner.clearRequests(); + } + + @Test + public void testBatchClient_closedSpannerWithOpenResultSet_streamsAreCancelled() { + Spanner spanner = createSpanner(); + assumeFalse(spanner.getOptions().getSessionPoolOptions().getUseMultiplexedSession()); + + BatchClient client = spanner.getBatchClient(DatabaseId.of("p", "i", "d")); + try (BatchReadOnlyTransaction transaction = + client.batchReadOnlyTransaction(TimestampBound.strong()); + ResultSet resultSet = transaction.executeQuery(SELECT_RANDOM_STATEMENT)) { + mockSpanner.freezeAfterReturningNumRows(1); + // This can sometimes fail, as the mock server may not always actually return the first row. + try { + assertTrue(resultSet.next()); + } catch (SpannerException exception) { + assertEquals(ErrorCode.DEADLINE_EXCEEDED, exception.getErrorCode()); + return; + } + ((SpannerImpl) spanner).close(1, TimeUnit.MILLISECONDS); + // This should return an error as the stream is cancelled. + SpannerException exception = + assertThrows( + SpannerException.class, + () -> { //noinspection StatementWithEmptyBody + while (resultSet.next()) {} + }); + assertEquals(ErrorCode.CANCELLED, exception.getErrorCode()); + } + } + + @Test + public void testNormalDatabaseClient_closedSpannerWithOpenResultSet_sessionsAreDeleted() + throws Exception { + Spanner spanner = createSpanner(); + assumeFalse(spanner.getOptions().getSessionPoolOptions().getUseMultiplexedSession()); + + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + try (ReadOnlyTransaction transaction = client.readOnlyTransaction(TimestampBound.strong()); + ResultSet resultSet = transaction.executeQuery(SELECT_RANDOM_STATEMENT)) { + mockSpanner.freezeAfterReturningNumRows(1); + // This can sometimes fail, as the mock server may not always actually return the first row. + try { + assertTrue(resultSet.next()); + } catch (SpannerException exception) { + assertEquals(ErrorCode.DEADLINE_EXCEEDED, exception.getErrorCode()); + return; + } + List executeSqlRequests = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(request -> request.getSql().equals(SELECT_RANDOM_STATEMENT.getSql())) + .collect(Collectors.toList()); + assertEquals(1, executeSqlRequests.size()); + ExecutorService service = Executors.newSingleThreadExecutor(); + service.submit(spanner::close); + // Verify that the session that is used by this transaction is deleted. + // That will automatically cancel the query. + mockSpanner.waitForRequestsToContain( + request -> + request instanceof DeleteSessionRequest + && ((DeleteSessionRequest) request) + .getName() + .equals(executeSqlRequests.get(0).getSession()), + /*timeoutMillis=*/ 1000L); + service.shutdownNow(); + } + } + + @Test + public void testStreamsAreCleanedUp() throws Exception { + String invalidSql = "select * from foo"; + Statement invalidStatement = Statement.of(invalidSql); + mockSpanner.putStatementResult( + StatementResult.exception( + invalidStatement, + Status.NOT_FOUND.withDescription("Table not found: foo").asRuntimeException())); + int numThreads = 16; + int numQueries = 32; + try (Spanner spanner = createSpanner()) { + BatchClient client = spanner.getBatchClient(DatabaseId.of("p", "i", "d")); + ExecutorService service = Executors.newFixedThreadPool(numThreads); + List> futures = new ArrayList<>(numQueries); + for (int n = 0; n < numQueries; n++) { + futures.add( + service.submit( + () -> { + try (BatchReadOnlyTransaction transaction = + client.batchReadOnlyTransaction(TimestampBound.strong())) { + if (ThreadLocalRandom.current().nextInt(10) < 2) { + try (ResultSet resultSet = transaction.executeQuery(invalidStatement)) { + SpannerException exception = + assertThrows(SpannerException.class, resultSet::next); + assertEquals(ErrorCode.NOT_FOUND, exception.getErrorCode()); + } + } else { + try (ResultSet resultSet = + transaction.executeQuery(SELECT_RANDOM_STATEMENT)) { + while (resultSet.next()) { + assertNotNull(resultSet.getCurrentRowAsStruct()); + } + } + } + } + })); + } + service.shutdown(); + for (Future fut : futures) { + fut.get(); + } + assertTrue(service.awaitTermination(1L, TimeUnit.MINUTES)); + // Verify that all response observers have been unregistered. + assertEquals( + 0, ((GapicSpannerRpc) ((SpannerImpl) spanner).getRpc()).getNumActiveResponseObservers()); + } + } +} diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/CompositeTracerTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/CompositeTracerTest.java new file mode 100644 index 00000000000..dfb7b252268 --- /dev/null +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/CompositeTracerTest.java @@ -0,0 +1,277 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.api.gax.tracing.ApiTracer; +import com.google.api.gax.tracing.ApiTracer.Scope; +import com.google.api.gax.tracing.MetricsTracer; +import com.google.common.collect.ImmutableList; +import com.google.spanner.v1.ReadRequest; +import java.lang.reflect.Method; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnit; +import org.mockito.junit.MockitoRule; +import org.threeten.bp.Duration; + +@RunWith(JUnit4.class) +public class CompositeTracerTest { + @Rule public final MockitoRule mockitoRule = MockitoJUnit.rule(); + + @Mock private ApiTracer child1; + @Mock private ApiTracer child2; + @Mock private OpenTelemetryApiTracer child3; + @Mock private MetricsTracer child4; + + private CompositeTracer compositeTracer; + + @Before + public void setup() { + compositeTracer = new CompositeTracer(ImmutableList.of(child1, child2, child3, child4)); + } + + @Test + public void testInScope() { + Scope scope1 = mock(Scope.class); + when(child1.inScope()).thenReturn(scope1); + + Scope scope2 = mock(Scope.class); + when(child2.inScope()).thenReturn(scope2); + + Scope scope3 = mock(Scope.class); + when(child3.inScope()).thenReturn(scope3); + + Scope scope4 = mock(Scope.class); + when(child4.inScope()).thenReturn(scope4); + + Scope parentScope = compositeTracer.inScope(); + + parentScope.close(); + verify(scope1, times(1)).close(); + verify(scope2, times(1)).close(); + verify(scope3, times(1)).close(); + verify(scope4, times(1)).close(); + } + + @Test + public void testOperationSucceeded() { + compositeTracer.operationSucceeded(); + verify(child1, times(1)).operationSucceeded(); + verify(child2, times(1)).operationSucceeded(); + verify(child3, times(1)).operationSucceeded(); + verify(child4, times(1)).operationSucceeded(); + } + + @Test + public void testOperationCancelled() { + compositeTracer.operationCancelled(); + verify(child1, times(1)).operationCancelled(); + verify(child2, times(1)).operationCancelled(); + verify(child3, times(1)).operationCancelled(); + verify(child4, times(1)).operationCancelled(); + } + + @Test + public void testOperationFailed() { + RuntimeException error = new RuntimeException(); + compositeTracer.operationFailed(error); + verify(child1, times(1)).operationFailed(error); + verify(child2, times(1)).operationFailed(error); + verify(child3, times(1)).operationFailed(error); + verify(child4, times(1)).operationFailed(error); + } + + @Test + public void testConnectionSelected() { + compositeTracer.connectionSelected("connection-one"); + verify(child1, times(1)).connectionSelected("connection-one"); + verify(child2, times(1)).connectionSelected("connection-one"); + verify(child3, times(1)).connectionSelected("connection-one"); + verify(child4, times(1)).connectionSelected("connection-one"); + } + + @Test + public void testAttemptStarted() { + ReadRequest request = ReadRequest.getDefaultInstance(); + compositeTracer.attemptStarted(request, 3); + verify(child1, times(1)).attemptStarted(request, 3); + verify(child2, times(1)).attemptStarted(request, 3); + verify(child3, times(1)).attemptStarted(request, 3); + verify(child4, times(1)).attemptStarted(request, 3); + } + + @Test + public void testAttemptSucceeded() { + compositeTracer.attemptSucceeded(); + verify(child1, times(1)).attemptSucceeded(); + verify(child2, times(1)).attemptSucceeded(); + verify(child3, times(1)).attemptSucceeded(); + verify(child4, times(1)).attemptSucceeded(); + } + + @Test + public void testAttemptCancelled() { + compositeTracer.attemptCancelled(); + verify(child1, times(1)).attemptCancelled(); + verify(child2, times(1)).attemptCancelled(); + verify(child3, times(1)).attemptCancelled(); + verify(child4, times(1)).attemptCancelled(); + } + + @Test + public void testAttemptFailed() { + RuntimeException error = new RuntimeException(); + Duration delay = Duration.ofMillis(10); + compositeTracer.attemptFailed(error, delay); + verify(child1, times(1)).attemptFailed(error, delay); + verify(child2, times(1)).attemptFailed(error, delay); + verify(child3, times(1)).attemptFailed(error, delay); + verify(child4, times(1)).attemptFailed(error, delay); + } + + @Test + public void testAttemptFailedRetriesExhausted() { + RuntimeException error = new RuntimeException(); + compositeTracer.attemptFailedRetriesExhausted(error); + verify(child1, times(1)).attemptFailedRetriesExhausted(error); + verify(child2, times(1)).attemptFailedRetriesExhausted(error); + verify(child3, times(1)).attemptFailedRetriesExhausted(error); + verify(child4, times(1)).attemptFailedRetriesExhausted(error); + } + + @Test + public void testAttemptPermanentFailure() { + RuntimeException error = new RuntimeException(); + compositeTracer.attemptPermanentFailure(error); + verify(child1, times(1)).attemptPermanentFailure(error); + verify(child2, times(1)).attemptPermanentFailure(error); + verify(child3, times(1)).attemptPermanentFailure(error); + verify(child4, times(1)).attemptPermanentFailure(error); + } + + @Test + public void testLroStartFailed() { + RuntimeException error = new RuntimeException(); + compositeTracer.lroStartFailed(error); + verify(child1, times(1)).lroStartFailed(error); + verify(child2, times(1)).lroStartFailed(error); + verify(child3, times(1)).lroStartFailed(error); + verify(child4, times(1)).lroStartFailed(error); + } + + @Test + public void testLroStartSucceeded() { + compositeTracer.lroStartSucceeded(); + verify(child1, times(1)).lroStartSucceeded(); + verify(child2, times(1)).lroStartSucceeded(); + verify(child3, times(1)).lroStartSucceeded(); + verify(child4, times(1)).lroStartSucceeded(); + } + + @Test + public void testResponseReceived() { + compositeTracer.responseReceived(); + verify(child1, times(1)).responseReceived(); + verify(child2, times(1)).responseReceived(); + verify(child3, times(1)).responseReceived(); + verify(child4, times(1)).responseReceived(); + } + + @Test + public void testRequestSent() { + compositeTracer.requestSent(); + verify(child1, times(1)).requestSent(); + verify(child2, times(1)).requestSent(); + verify(child3, times(1)).requestSent(); + verify(child4, times(1)).requestSent(); + } + + @Test + public void testBatchRequestSent() { + compositeTracer.batchRequestSent(2, 20); + verify(child1, times(1)).batchRequestSent(2, 20); + verify(child2, times(1)).batchRequestSent(2, 20); + verify(child3, times(1)).batchRequestSent(2, 20); + verify(child4, times(1)).batchRequestSent(2, 20); + } + + @Test + public void testMethodsOverrideMetricsTracer() { + Method[] metricsTracerMethods = MetricsTracer.class.getDeclaredMethods(); + Method[] compositeTracerMethods = CompositeTracer.class.getDeclaredMethods(); + + List visibleForTestingMethods = Arrays.asList("getAttributes", "extractStatus"); + + Set compositeMethodsSet = new HashSet<>(Arrays.asList(compositeTracerMethods)); + + for (Method metricsMethod : metricsTracerMethods) { + if (!visibleForTestingMethods.contains(metricsMethod.getName()) + && !containsMethod(compositeMethodsSet, metricsMethod)) { + throw new AssertionError("Method not found in compositeTracerMethods: " + metricsMethod); + } + } + } + + @Test + public void testMethodsOverrideOpenTelemetryTracer() { + + Method[] compositeTracerMethods = CompositeTracer.class.getDeclaredMethods(); + + List openTelemetryTracerMethods = + Arrays.stream(OpenTelemetryApiTracer.class.getDeclaredMethods()) + .filter(method -> java.lang.reflect.Modifier.isPublic(method.getModifiers())) + .collect(Collectors.toList()); + + Set compositeMethodsSet = new HashSet<>(Arrays.asList(compositeTracerMethods)); + + for (Method metricsMethod : openTelemetryTracerMethods) { + if (!containsMethod(compositeMethodsSet, metricsMethod)) { + throw new AssertionError("Method not found in compositeTracerMethods: " + metricsMethod); + } + } + } + + private boolean compareMethods(Method actual, Method expected) { + return actual.getName().equals(expected.getName()) + && Arrays.equals(actual.getParameterTypes(), expected.getParameterTypes()) + && actual.getModifiers() == expected.getModifiers() + && actual.getReturnType().equals(expected.getReturnType()); + } + + public boolean containsMethod(Set methodSet, Method method) { + for (Method m : methodSet) { + if (compareMethods(m, method)) { + return true; + } + } + return false; + } +} diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseClientImplTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseClientImplTest.java index 41da46a56a8..62a10c0adb4 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseClientImplTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseClientImplTest.java @@ -52,6 +52,7 @@ import com.google.cloud.spanner.AsyncTransactionManager.TransactionContextFuture; import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.Options.RpcOrderBy; import com.google.cloud.spanner.Options.RpcPriority; import com.google.cloud.spanner.Options.TransactionOption; import com.google.cloud.spanner.ReadContext.QueryAnalyzeMode; @@ -89,6 +90,7 @@ import com.google.spanner.v1.ExecuteSqlRequest.QueryMode; import com.google.spanner.v1.ExecuteSqlRequest.QueryOptions; import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.ReadRequest.OrderBy; import com.google.spanner.v1.RequestOptions.Priority; import com.google.spanner.v1.ResultSetMetadata; import com.google.spanner.v1.ResultSetStats; @@ -1722,6 +1724,27 @@ public void testExecuteReadWithTag() { assertThat(request.getRequestOptions().getTransactionTag()).isEmpty(); } + @Test + public void testExecuteReadWithOrderByOption() { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + try (ResultSet resultSet = + client + .singleUse() + .read( + READ_TABLE_NAME, + KeySet.singleKey(Key.of(1L)), + READ_COLUMN_NAMES, + Options.orderBy(RpcOrderBy.NO_ORDER))) { + consumeResults(resultSet); + } + + List requests = mockSpanner.getRequestsOfType(ReadRequest.class); + assertThat(requests).hasSize(1); + ReadRequest request = requests.get(0); + assertEquals(OrderBy.ORDER_BY_NO_ORDER, request.getOrderBy()); + } + @Test public void testExecuteReadWithDirectedReadOptions() { DatabaseClient client = @@ -3836,7 +3859,8 @@ public void testBatchCreateSessionsFailure_shouldNotPropagateToCloseMethod() { try { // Simulate session creation failures on the backend. mockSpanner.setBatchCreateSessionsExecutionTime( - SimulatedExecutionTime.ofStickyException(Status.RESOURCE_EXHAUSTED.asRuntimeException())); + SimulatedExecutionTime.ofStickyException( + Status.FAILED_PRECONDITION.asRuntimeException())); DatabaseClient client = spannerWithEmptySessionPool.getDatabaseClient( DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); @@ -3844,7 +3868,7 @@ public void testBatchCreateSessionsFailure_shouldNotPropagateToCloseMethod() { // non-blocking, and any exceptions will be delayed until actual query execution. try (ResultSet rs = client.singleUse().executeQuery(SELECT1)) { SpannerException e = assertThrows(SpannerException.class, rs::next); - assertThat(e.getErrorCode()).isEqualTo(ErrorCode.RESOURCE_EXHAUSTED); + assertThat(e.getErrorCode()).isEqualTo(ErrorCode.FAILED_PRECONDITION); } } finally { mockSpanner.setBatchCreateSessionsExecutionTime(SimulatedExecutionTime.none()); @@ -3883,7 +3907,7 @@ public void testReadWriteTransaction_usesOptions() { TransactionOption option = mock(TransactionOption.class); TraceWrapper traceWrapper = - new TraceWrapper(Tracing.getTracer(), OpenTelemetry.noop().getTracer("")); + new TraceWrapper(Tracing.getTracer(), OpenTelemetry.noop().getTracer(""), false); DatabaseClientImpl client = new DatabaseClientImpl(pool, traceWrapper); client.readWriteTransaction(option); @@ -5082,8 +5106,7 @@ public void testSessionPoolExhaustedError_containsStackTraces() { // Deliberately leak 4 sessions. for (int i = 0; i < 4; i++) { // Get a transaction manager without doing anything with it. This will reserve a session - // from - // the pool, but not increase the number of sessions marked as in use. + // from the pool, but not increase the number of sessions marked as in use. transactions.add(client.transactionManager()); } // Trying to get yet another transaction will fail. @@ -5111,6 +5134,19 @@ public void testSessionPoolExhaustedError_containsStackTraces() { for (TransactionManager transaction : transactions) { transaction.close(); } + // Wait up to 100 milliseconds for the sessions to actually all be in the pool, as there are + // two possible ways that the session pool handles the above: + // 1. The pool starts to create 4 sessions. + // 2. It then hands out whatever session has been created to one of the waiters. + // 3. The waiting process then executes its transaction, and when finished, the session is + // given to any other process waiting at that moment. + // The above means that although there will always be 4 sessions created, it could in theory + // be that not all of them are used, as it could be that a transaction finishes before the + // creation of session 2, 3, or 4 finished, and then the existing session is re-used. + Stopwatch watch = Stopwatch.createStarted(); + while (pool.getNumberOfSessionsInPool() < 4 && watch.elapsed(TimeUnit.MILLISECONDS) < 100) { + Thread.yield(); + } // Closing the transactions should return the sessions to the pool. assertEquals(4, pool.getNumberOfSessionsInPool()); } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/FailOnOverkillTraceComponentImpl.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/FailOnOverkillTraceComponentImpl.java index 9e54af9c4e7..14f575ef3d9 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/FailOnOverkillTraceComponentImpl.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/FailOnOverkillTraceComponentImpl.java @@ -48,9 +48,11 @@ import java.util.Collections; import java.util.EnumSet; import java.util.LinkedHashMap; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Random; +import javax.annotation.Nullable; /** * Simple {@link TraceComponent} implementation that will throw an exception if a {@link Span} is @@ -65,6 +67,7 @@ public class FailOnOverkillTraceComponentImpl extends TraceComponent { private final TraceConfig traceConfig = new TestTraceConfig(); private static final Map spans = Collections.synchronizedMap(new LinkedHashMap<>()); + private static final List spanList = Collections.synchronizedList(new LinkedList<>()); private static final List annotations = new ArrayList<>(); @@ -72,22 +75,37 @@ public static class TestSpan extends Span { @GuardedBy("this") private volatile boolean ended = false; - private String spanName; + private final String spanName; + + private Status status; + + private final List annotations = Collections.synchronizedList(new ArrayList<>()); private TestSpan(String spanName, SpanContext context, EnumSet options) { super(context, options); this.spanName = spanName; spans.put(this.spanName, false); + spanList.add(this); + } + + public String getSpanName() { + return this.spanName; + } + + public List getAnnotations() { + return this.annotations; } @Override public void addAnnotation(String description, Map attributes) { - annotations.add(description); + FailOnOverkillTraceComponentImpl.annotations.add(description); + this.annotations.add(description); } @Override public void addAnnotation(Annotation annotation) { - annotations.add(annotation.getDescription()); + FailOnOverkillTraceComponentImpl.annotations.add(annotation.getDescription()); + this.annotations.add(annotation.getDescription()); } @Override @@ -99,8 +117,15 @@ public void addAttributes(Map attributes) {} @Override public void addLink(Link link) {} + @Nullable + public Status getStatus() { + return this.status; + } + @Override - public void setStatus(Status status) {} + public void setStatus(Status status) { + this.status = status; + } @Override public void end(EndSpanOptions options) { @@ -108,8 +133,10 @@ public void end(EndSpanOptions options) { if (ended) { throw new IllegalStateException(this.spanName + " already ended"); } - spans.put(this.spanName, true); - ended = true; + if (spans.containsKey(this.spanName)) { + spans.put(this.spanName, true); + ended = true; + } } } } @@ -229,12 +256,17 @@ Map getSpans() { return spans; } + List getTestSpans() { + return spanList; + } + List getAnnotations() { return annotations; } void clearSpans() { spans.clear(); + spanList.clear(); } void clearAnnotations() { diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/GrpcResultSetTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/GrpcResultSetTest.java index 2051e006d81..62336163eaf 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/GrpcResultSetTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/GrpcResultSetTest.java @@ -81,7 +81,7 @@ public void onDone(boolean withBeginTransaction) {} @Before public void setUp() { - stream = new GrpcStreamIterator(10); + stream = new GrpcStreamIterator(10, /*cancelQueryWhenClientIsClosed=*/ false); stream.setCall( new SpannerRpc.StreamingCall() { @Override diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ITSessionPoolIntegrationTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ITSessionPoolIntegrationTest.java index be9f6841f20..df29aac9170 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ITSessionPoolIntegrationTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ITSessionPoolIntegrationTest.java @@ -96,7 +96,7 @@ public ScheduledExecutorService get() { } }, ((SpannerImpl) env.getTestHelper().getClient()).getSessionClient(db.getId()), - new TraceWrapper(Tracing.getTracer(), OpenTelemetry.noop().getTracer("")), + new TraceWrapper(Tracing.getTracer(), OpenTelemetry.noop().getTracer(""), false), OpenTelemetry.noop()); } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceAdminClientImplTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceAdminClientImplTest.java index 58373bcca0b..558efff7487 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceAdminClientImplTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceAdminClientImplTest.java @@ -280,12 +280,17 @@ public void createInstance() throws Exception { when(rpc.createInstance( "projects/" + PROJECT_ID, INSTANCE_ID, - getInstanceProto().toBuilder().setProcessingUnits(0).build())) + getInstanceProto() + .toBuilder() + .setProcessingUnits(0) + .setEdition(com.google.spanner.admin.instance.v1.Instance.Edition.ENTERPRISE_PLUS) + .build())) .thenReturn(rawOperationFuture); OperationFuture op = client.createInstance( InstanceInfo.newBuilder(InstanceId.of(PROJECT_ID, INSTANCE_ID)) .setInstanceConfigId(InstanceConfigId.of(PROJECT_ID, CONFIG_ID)) + .setEdition(com.google.spanner.admin.instance.v1.Instance.Edition.ENTERPRISE_PLUS) .setNodeCount(1) .build()); assertThat(op.isDone()).isTrue(); diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IntegrationTestEnv.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IntegrationTestEnv.java index 4d4f639d3d9..4593c04cc18 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IntegrationTestEnv.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IntegrationTestEnv.java @@ -17,6 +17,7 @@ package com.google.cloud.spanner; import static com.google.common.base.Preconditions.checkState; +import static org.junit.Assume.assumeFalse; import com.google.api.client.util.ExponentialBackOff; import com.google.api.gax.longrunning.OperationFuture; @@ -26,6 +27,7 @@ import com.google.cloud.spanner.testing.RemoteSpannerHelper; import com.google.common.collect.Iterators; import com.google.spanner.admin.instance.v1.CreateInstanceMetadata; +import java.util.Objects; import java.util.Random; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -85,13 +87,25 @@ protected void initializeConfig() throw new NullPointerException("Property " + TEST_ENV_CONFIG_CLASS_NAME + " needs to be set"); } Class configClass; + if (EmulatorSpannerHelper.isUsingEmulator()) { + // Make sure that we use an owned instance on the emulator. + System.setProperty(TEST_INSTANCE_PROPERTY, ""); + } configClass = (Class) Class.forName(CONFIG_CLASS); config = configClass.newInstance(); } + boolean isCloudDevel() { + return Objects.equals( + System.getProperty("spanner.gce.config.server_url"), + "https://staging-wrenchworks.sandbox.googleapis.com"); + } + @Override protected void before() throws Throwable { this.initializeConfig(); + assumeFalse(alwaysCreateNewInstance && isCloudDevel()); + this.config.setUp(); SpannerOptions options = config.spannerOptions(); @@ -133,7 +147,7 @@ protected void after() { private void initializeInstance(InstanceId instanceId) throws Exception { InstanceConfig instanceConfig; try { - instanceConfig = instanceAdminClient.getInstanceConfig("regional-us-central1"); + instanceConfig = instanceAdminClient.getInstanceConfig("regional-us-east4"); } catch (Throwable ignore) { instanceConfig = Iterators.get(instanceAdminClient.listInstanceConfigs().iterateAll().iterator(), 0, null); @@ -146,6 +160,7 @@ private void initializeInstance(InstanceId instanceId) throws Exception { InstanceInfo.newBuilder(instanceId) .setNodeCount(1) .setDisplayName("Test instance") + .setEdition(com.google.spanner.admin.instance.v1.Instance.Edition.ENTERPRISE_PLUS) .setInstanceConfigId(configId) .build(); OperationFuture op = diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IntegrationTestWithClosedSessionsEnv.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IntegrationTestWithClosedSessionsEnv.java index b71771ae2ca..7627ed54883 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IntegrationTestWithClosedSessionsEnv.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/IntegrationTestWithClosedSessionsEnv.java @@ -47,7 +47,10 @@ private static class SpannerWithClosedSessionsImpl extends SpannerImpl { @Override DatabaseClientImpl createDatabaseClient( - String clientId, SessionPool pool, MultiplexedSessionDatabaseClient ignore) { + String clientId, + SessionPool pool, + boolean useMultiplexedSessionBlindWriteIgnore, + MultiplexedSessionDatabaseClient ignore) { return new DatabaseClientWithClosedSessionImpl(clientId, pool, tracer); } } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockSpannerServiceImpl.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockSpannerServiceImpl.java index 54b992b69ff..9f0a2822d87 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockSpannerServiceImpl.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MockSpannerServiceImpl.java @@ -578,6 +578,7 @@ private static void checkStreamException( private final Object lock = new Object(); private Deque requests = new ConcurrentLinkedDeque<>(); private volatile CountDownLatch freezeLock = new CountDownLatch(0); + private final AtomicInteger freezeAfterReturningNumRows = new AtomicInteger(); private Queue exceptions = new ConcurrentLinkedQueue<>(); private boolean stickyGlobalExceptions = false; private ConcurrentMap statementResults = new ConcurrentHashMap<>(); @@ -784,6 +785,10 @@ public void unfreeze() { freezeLock.countDown(); } + public void freezeAfterReturningNumRows(int numRows) { + freezeAfterReturningNumRows.set(numRows); + } + public void setMaxSessionsInOneBatch(int max) { this.maxNumSessionsInOneBatch = max; } @@ -808,7 +813,7 @@ public void batchCreateSessions( batchCreateSessionsExecutionTime.simulateExecutionTime( exceptions, stickyGlobalExceptions, freezeLock); if (sessions.size() >= maxTotalSessions) { - throw Status.RESOURCE_EXHAUSTED + throw Status.FAILED_PRECONDITION .withDescription("Maximum number of sessions reached") .asRuntimeException(); } @@ -1678,7 +1683,8 @@ private void returnPartialResultSet( ByteString transactionId, TransactionSelector transactionSelector, StreamObserver responseObserver, - SimulatedExecutionTime executionTime) { + SimulatedExecutionTime executionTime) + throws Exception { ResultSetMetadata metadata = resultSet.getMetadata(); if (transactionId == null) { Transaction transaction = getTemporaryTransactionOrNull(transactionSelector); @@ -1700,6 +1706,12 @@ private void returnPartialResultSet( SimulatedExecutionTime.checkStreamException( index, executionTime.exceptions, executionTime.streamIndices); responseObserver.onNext(iterator.next()); + if (freezeAfterReturningNumRows.get() > 0) { + if (freezeAfterReturningNumRows.decrementAndGet() == 0) { + freeze(); + freezeLock.await(); + } + } index++; } responseObserver.onCompleted(); diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MultiplexedSessionDatabaseClientMockServerTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MultiplexedSessionDatabaseClientMockServerTest.java index bf4a02a10c5..b6dff424079 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MultiplexedSessionDatabaseClientMockServerTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MultiplexedSessionDatabaseClientMockServerTest.java @@ -16,6 +16,7 @@ package com.google.cloud.spanner; +import static com.google.common.truth.Truth.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; @@ -24,15 +25,21 @@ import static org.junit.Assert.assertTrue; import com.google.cloud.NoCredentials; +import com.google.cloud.Timestamp; import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.Options.RpcPriority; import com.google.cloud.spanner.connection.RandomResultSetGenerator; import com.google.common.base.Stopwatch; import com.google.common.collect.ImmutableList; +import com.google.protobuf.ByteString; +import com.google.spanner.v1.CommitRequest; import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.RequestOptions.Priority; import com.google.spanner.v1.Session; import io.grpc.Status; import java.time.Duration; +import java.util.Collections; import java.util.List; import java.util.Set; import java.util.UUID; @@ -63,6 +70,7 @@ public void createSpannerInstance() { .setSessionPoolOption( SessionPoolOptions.newBuilder() .setUseMultiplexedSession(true) + .setUseMultiplexedSessionBlindWrite(true) // Set the maintainer to loop once every 1ms .setMultiplexedSessionMaintenanceLoopFrequency(Duration.ofMillis(1L)) // Set multiplexed sessions to be replaced once every 1ms @@ -309,6 +317,156 @@ public void testMaintainerInvalidatesMultiplexedSessionClientIfUnimplemented() { assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); } + @Test + public void testWriteAtLeastOnceAborted() { + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + // Force the Commit RPC to return Aborted the first time it is called. The exception is cleared + // after the first call, so the retry should succeed. + mockSpanner.setCommitExecutionTime( + SimulatedExecutionTime.ofException( + mockSpanner.createAbortedException(ByteString.copyFromUtf8("test")))); + Timestamp timestamp = + client.writeAtLeastOnce( + Collections.singletonList( + Mutation.newInsertBuilder("FOO").set("ID").to(1L).set("NAME").to("Bar").build())); + assertNotNull(timestamp); + + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertEquals(2, commitRequests.size()); + for (CommitRequest request : commitRequests) { + assertTrue(mockSpanner.getSession(request.getSession()).getMultiplexed()); + } + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void testWriteAtLeastOnce() { + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + Timestamp timestamp = + client.writeAtLeastOnce( + Collections.singletonList( + Mutation.newInsertBuilder("FOO").set("ID").to(1L).set("NAME").to("Bar").build())); + assertNotNull(timestamp); + + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(commitRequests).hasSize(1); + CommitRequest commit = commitRequests.get(0); + assertNotNull(commit.getSingleUseTransaction()); + assertTrue(commit.getSingleUseTransaction().hasReadWrite()); + assertFalse(commit.getSingleUseTransaction().getExcludeTxnFromChangeStreams()); + assertNotNull(commit.getRequestOptions()); + assertEquals(Priority.PRIORITY_UNSPECIFIED, commit.getRequestOptions().getPriority()); + assertTrue(mockSpanner.getSession(commit.getSession()).getMultiplexed()); + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void testWriteAtLeastOnceWithCommitStats() { + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + CommitResponse response = + client.writeAtLeastOnceWithOptions( + Collections.singletonList( + Mutation.newInsertBuilder("FOO").set("ID").to(1L).set("NAME").to("Bar").build()), + Options.commitStats()); + assertNotNull(response); + assertNotNull(response.getCommitTimestamp()); + assertNotNull(response.getCommitStats()); + + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(commitRequests).hasSize(1); + CommitRequest commit = commitRequests.get(0); + assertNotNull(commit.getSingleUseTransaction()); + assertTrue(commit.getSingleUseTransaction().hasReadWrite()); + assertFalse(commit.getSingleUseTransaction().getExcludeTxnFromChangeStreams()); + assertNotNull(commit.getRequestOptions()); + assertEquals(Priority.PRIORITY_UNSPECIFIED, commit.getRequestOptions().getPriority()); + assertTrue(mockSpanner.getSession(commit.getSession()).getMultiplexed()); + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void testWriteAtLeastOnceWithOptions() { + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + client.writeAtLeastOnceWithOptions( + Collections.singletonList( + Mutation.newInsertBuilder("FOO").set("ID").to(1L).set("NAME").to("Bar").build()), + Options.priority(RpcPriority.LOW)); + + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(commitRequests).hasSize(1); + CommitRequest commit = commitRequests.get(0); + assertNotNull(commit.getSingleUseTransaction()); + assertTrue(commit.getSingleUseTransaction().hasReadWrite()); + assertFalse(commit.getSingleUseTransaction().getExcludeTxnFromChangeStreams()); + assertNotNull(commit.getRequestOptions()); + assertEquals(Priority.PRIORITY_LOW, commit.getRequestOptions().getPriority()); + assertTrue(mockSpanner.getSession(commit.getSession()).getMultiplexed()); + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void testWriteAtLeastOnceWithTagOptions() { + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + client.writeAtLeastOnceWithOptions( + Collections.singletonList( + Mutation.newInsertBuilder("FOO").set("ID").to(1L).set("NAME").to("Bar").build()), + Options.tag("app=spanner,env=test")); + + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(commitRequests).hasSize(1); + CommitRequest commit = commitRequests.get(0); + assertNotNull(commit.getSingleUseTransaction()); + assertTrue(commit.getSingleUseTransaction().hasReadWrite()); + assertFalse(commit.getSingleUseTransaction().getExcludeTxnFromChangeStreams()); + assertNotNull(commit.getRequestOptions()); + assertThat(commit.getRequestOptions().getTransactionTag()).isEqualTo("app=spanner,env=test"); + assertThat(commit.getRequestOptions().getRequestTag()).isEmpty(); + assertTrue(mockSpanner.getSession(commit.getSession()).getMultiplexed()); + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + + @Test + public void testWriteAtLeastOnceWithExcludeTxnFromChangeStreams() { + DatabaseClientImpl client = + (DatabaseClientImpl) spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + client.writeAtLeastOnceWithOptions( + Collections.singletonList( + Mutation.newInsertBuilder("FOO").set("ID").to(1L).set("NAME").to("Bar").build()), + Options.excludeTxnFromChangeStreams()); + + List commitRequests = mockSpanner.getRequestsOfType(CommitRequest.class); + assertThat(commitRequests).hasSize(1); + CommitRequest commit = commitRequests.get(0); + assertNotNull(commit.getSingleUseTransaction()); + assertTrue(commit.getSingleUseTransaction().hasReadWrite()); + assertTrue(commit.getSingleUseTransaction().getExcludeTxnFromChangeStreams()); + assertTrue(mockSpanner.getSession(commit.getSession()).getMultiplexed()); + + assertNotNull(client.multiplexedSessionDatabaseClient); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsAcquired().get()); + assertEquals(1L, client.multiplexedSessionDatabaseClient.getNumSessionsReleased().get()); + } + private void waitForSessionToBeReplaced(DatabaseClientImpl client) { assertNotNull(client.multiplexedSessionDatabaseClient); SessionReference sessionReference = diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MultiplexedSessionDatabaseClientTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MultiplexedSessionDatabaseClientTest.java index d7d9b7395ed..287fdd0bd0b 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MultiplexedSessionDatabaseClientTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MultiplexedSessionDatabaseClientTest.java @@ -16,8 +16,10 @@ package com.google.cloud.spanner; +import static com.google.common.truth.Truth.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import static org.junit.Assume.assumeFalse; import static org.junit.Assume.assumeTrue; @@ -27,6 +29,8 @@ import static org.mockito.Mockito.when; import com.google.cloud.spanner.SessionClient.SessionConsumer; +import java.io.PrintWriter; +import java.io.StringWriter; import java.lang.reflect.Field; import java.time.Clock; import java.time.Duration; @@ -110,10 +114,9 @@ public void testMaintainer() { } @Test - public void testForceDisableEnvVar() throws Exception { + public void testDisableMultiplexedSessionEnvVar() throws Exception { assumeTrue(isJava8() && !isWindows()); - assumeFalse( - System.getenv().containsKey("GOOGLE_CLOUD_SPANNER_FORCE_DISABLE_MULTIPLEXED_SESSIONS")); + assumeFalse(System.getenv().containsKey("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS")); // Assert that the mux sessions setting is respected by default. assertTrue( @@ -129,8 +132,7 @@ public void testForceDisableEnvVar() throws Exception { (Map) field.get(System.getenv()); try { - writeableEnvironmentVariables.put( - "GOOGLE_CLOUD_SPANNER_FORCE_DISABLE_MULTIPLEXED_SESSIONS", "true"); + writeableEnvironmentVariables.put("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS", "false"); // Assert that the env var overrides the mux sessions setting. assertFalse( SessionPoolOptions.newBuilder() @@ -138,8 +140,108 @@ public void testForceDisableEnvVar() throws Exception { .build() .getUseMultiplexedSession()); } finally { - writeableEnvironmentVariables.remove( - "GOOGLE_CLOUD_SPANNER_FORCE_DISABLE_MULTIPLEXED_SESSIONS"); + writeableEnvironmentVariables.remove("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS"); + } + } + + @Test + public void testEnableMultiplexedSessionEnvVar() throws Exception { + assumeTrue(isJava8() && !isWindows()); + assumeFalse(System.getenv().containsKey("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS")); + + // Assert that the mux sessions setting is respected by default. + assertFalse( + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(false) + .build() + .getUseMultiplexedSession()); + + Class classOfMap = System.getenv().getClass(); + Field field = classOfMap.getDeclaredField("m"); + field.setAccessible(true); + Map writeableEnvironmentVariables = + (Map) field.get(System.getenv()); + + try { + writeableEnvironmentVariables.put("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS", "true"); + // Assert that the env var overrides the mux sessions setting. + assertTrue( + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(false) + .build() + .getUseMultiplexedSession()); + } finally { + writeableEnvironmentVariables.remove("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS"); + } + } + + @Test + public void testIgnoreMultiplexedSessionEnvVar() throws Exception { + assumeTrue(isJava8() && !isWindows()); + assumeFalse(System.getenv().containsKey("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS")); + + // Assert that the mux sessions setting is respected by default. + assertFalse( + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(false) + .build() + .getUseMultiplexedSession()); + + Class classOfMap = System.getenv().getClass(); + Field field = classOfMap.getDeclaredField("m"); + field.setAccessible(true); + Map writeableEnvironmentVariables = + (Map) field.get(System.getenv()); + + try { + writeableEnvironmentVariables.put("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS", ""); + // Assert that the env var overrides the mux sessions setting. + assertFalse( + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(false) + .build() + .getUseMultiplexedSession()); + } finally { + writeableEnvironmentVariables.remove("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS"); + } + } + + @Test + public void testThrowExceptionMultiplexedSessionEnvVarInvalidValues() throws Exception { + assumeTrue(isJava8() && !isWindows()); + assumeFalse(System.getenv().containsKey("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS")); + + // Assert that the mux sessions setting is respected by default. + assertFalse( + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(false) + .build() + .getUseMultiplexedSession()); + + Class classOfMap = System.getenv().getClass(); + Field field = classOfMap.getDeclaredField("m"); + field.setAccessible(true); + Map writeableEnvironmentVariables = + (Map) field.get(System.getenv()); + + try { + writeableEnvironmentVariables.put("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS", "test"); + + // setting an invalid GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS value throws error. + IllegalArgumentException e = + assertThrows( + IllegalArgumentException.class, + () -> + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(false) + .build() + .getUseMultiplexedSession()); + StringWriter sw = new StringWriter(); + e.printStackTrace(new PrintWriter(sw)); + assertThat(sw.toString()) + .contains("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS should be either true or false"); + } finally { + writeableEnvironmentVariables.remove("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS"); } } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MultiplexedSessionMaintainerTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MultiplexedSessionMaintainerTest.java deleted file mode 100644 index f596183507e..00000000000 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MultiplexedSessionMaintainerTest.java +++ /dev/null @@ -1,310 +0,0 @@ -/* - * Copyright 2024 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.spanner; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assume.assumeTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; -import static org.mockito.MockitoAnnotations.initMocks; - -import com.google.cloud.Timestamp; -import com.google.cloud.spanner.SessionPool.CachedSession; -import com.google.cloud.spanner.SessionPool.MultiplexedSessionInitializationConsumer; -import com.google.cloud.spanner.SessionPool.MultiplexedSessionMaintainerConsumer; -import com.google.cloud.spanner.SessionPool.Position; -import com.google.cloud.spanner.SessionPool.SessionFutureWrapper; -import io.opencensus.trace.Tracing; -import io.opentelemetry.api.OpenTelemetry; -import java.util.ArrayList; -import java.util.List; -import java.util.Set; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.stream.Collectors; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.mockito.Mock; -import org.threeten.bp.Duration; -import org.threeten.bp.Instant; - -@RunWith(JUnit4.class) -public class MultiplexedSessionMaintainerTest extends BaseSessionPoolTest { - - private ExecutorService executor = Executors.newSingleThreadExecutor(); - private @Mock SpannerImpl client; - private @Mock SessionClient sessionClient; - private @Mock SpannerOptions spannerOptions; - private DatabaseId db = DatabaseId.of("projects/p/instances/i/databases/unused"); - private SessionPoolOptions options; - private FakeClock clock = new FakeClock(); - private List multiplexedSessionsRemoved = new ArrayList<>(); - - @BeforeClass - public static void checkUsesMultiplexedSessionPool() { - assumeTrue("Only run if the maintainer in the session pool is used", false); - } - - @Before - public void setUp() { - initMocks(this); - when(client.getOptions()).thenReturn(spannerOptions); - when(client.getSessionClient(db)).thenReturn(sessionClient); - when(sessionClient.getSpanner()).thenReturn(client); - when(spannerOptions.getNumChannels()).thenReturn(4); - when(spannerOptions.getDatabaseRole()).thenReturn("role"); - options = - SessionPoolOptions.newBuilder() - .setMinSessions(1) - .setMaxIdleSessions(1) - .setMaxSessions(5) - .setIncStep(1) - .setKeepAliveIntervalMinutes(2) - .setUseMultiplexedSession(true) - .setPoolMaintainerClock(clock) - .build(); - when(spannerOptions.getSessionPoolOptions()).thenReturn(options); - assumeTrue(options.getUseMultiplexedSession()); - multiplexedSessionsRemoved.clear(); - } - - @Test - public void testMaintainMultiplexedSession_whenNewSessionCreated_assertThatStaleSessionIsRemoved() - throws Exception { - doAnswer( - invocation -> { - MultiplexedSessionInitializationConsumer consumer = - invocation.getArgument(0, MultiplexedSessionInitializationConsumer.class); - ReadContext mockContext = mock(ReadContext.class); - Timestamp timestamp = - Timestamp.ofTimeSecondsAndNanos( - Instant.ofEpochMilli(clock.currentTimeMillis.get()).getEpochSecond(), 0); - consumer.onSessionReady( - setupMockSession( - buildMockMultiplexedSession(client, mockContext, timestamp.toProto()), - mockContext)); - return null; - }) - .when(sessionClient) - .asyncCreateMultiplexedSession(any(MultiplexedSessionInitializationConsumer.class)); - doAnswer( - invocation -> { - MultiplexedSessionMaintainerConsumer consumer = - invocation.getArgument(0, MultiplexedSessionMaintainerConsumer.class); - ReadContext mockContext = mock(ReadContext.class); - Timestamp timestamp = - Timestamp.ofTimeSecondsAndNanos( - Instant.ofEpochMilli(clock.currentTimeMillis.get()).getEpochSecond(), 0); - consumer.onSessionReady( - setupMockSession( - buildMockMultiplexedSession(client, mockContext, timestamp.toProto()), - mockContext)); - return null; - }) - .when(sessionClient) - .asyncCreateMultiplexedSession(any(MultiplexedSessionMaintainerConsumer.class)); - - SessionPool pool = createPool(); - - // Run one maintenance loop. - CachedSession session1 = pool.getMultiplexedSessionWithFallback().get().get(); - runMaintenanceLoop(clock, pool, 1); - assertTrue(multiplexedSessionsRemoved.isEmpty()); - - // Advance clock by 8 days - clock.currentTimeMillis.addAndGet(Duration.ofDays(8).toMillis()); - - // Run second maintenance loop. the first session would now be stale since it has now existed - // for more than 7 days. - runMaintenanceLoop(clock, pool, 1); - - CachedSession session2 = pool.getMultiplexedSessionWithFallback().get().get(); - assertNotEquals(session1.getName(), session2.getName()); - assertEquals(1, multiplexedSessionsRemoved.size()); - assertTrue(getNameOfSessionRemoved().contains(session1.getName())); - - // Advance clock by 8 days - clock.currentTimeMillis.addAndGet(Duration.ofDays(8).toMillis()); - - // Run third maintenance loop. the second session would now be stale since it has now existed - // for more than 7 days - runMaintenanceLoop(clock, pool, 1); - - CachedSession session3 = pool.getMultiplexedSessionWithFallback().get().get(); - assertNotEquals(session2.getName(), session3.getName()); - assertEquals(2, multiplexedSessionsRemoved.size()); - assertTrue(getNameOfSessionRemoved().contains(session2.getName())); - } - - @Test - public void - testMaintainMultiplexedSession_whenMultiplexedSessionNotStale_assertThatSessionIsNotRemoved() { - doAnswer( - invocation -> { - MultiplexedSessionInitializationConsumer consumer = - invocation.getArgument(0, MultiplexedSessionInitializationConsumer.class); - ReadContext mockContext = mock(ReadContext.class); - Timestamp timestamp = - Timestamp.ofTimeSecondsAndNanos( - Instant.ofEpochMilli(clock.currentTimeMillis.get()).getEpochSecond(), 0); - consumer.onSessionReady( - setupMockSession( - buildMockMultiplexedSession(client, mockContext, timestamp.toProto()), - mockContext)); - return null; - }) - .when(sessionClient) - .asyncCreateMultiplexedSession(any(MultiplexedSessionInitializationConsumer.class)); - SessionPool pool = createPool(); - - // Run one maintenance loop. - SessionFutureWrapper session1 = pool.getMultiplexedSessionWithFallback(); - runMaintenanceLoop(clock, pool, 1); - assertTrue(multiplexedSessionsRemoved.isEmpty()); - - // Advance clock by 4 days - clock.currentTimeMillis.addAndGet(Duration.ofDays(4).toMillis()); - // Run one maintenance loop. the first session would not be stale yet since it has now existed - // for less than 7 days. - runMaintenanceLoop(clock, pool, 1); - SessionFutureWrapper session2 = pool.getMultiplexedSessionWithFallback(); - assertTrue(multiplexedSessionsRemoved.isEmpty()); - assertEquals(session1.get().getName(), session2.get().getName()); - } - - @Test - public void - testMaintainMultiplexedSession_whenMultiplexedSessionCreationFailed_testRetryAfterDelay() { - doAnswer( - invocation -> { - MultiplexedSessionInitializationConsumer consumer = - invocation.getArgument(0, MultiplexedSessionInitializationConsumer.class); - ReadContext mockContext = mock(ReadContext.class); - Timestamp timestamp = - Timestamp.ofTimeSecondsAndNanos( - Instant.ofEpochMilli(clock.currentTimeMillis.get()).getEpochSecond(), 0); - consumer.onSessionReady( - setupMockSession( - buildMockMultiplexedSession(client, mockContext, timestamp.toProto()), - mockContext)); - return null; - }) - .when(sessionClient) - .asyncCreateMultiplexedSession(any(MultiplexedSessionInitializationConsumer.class)); - doAnswer( - invocation -> { - MultiplexedSessionMaintainerConsumer consumer = - invocation.getArgument(0, MultiplexedSessionMaintainerConsumer.class); - consumer.onSessionCreateFailure( - SpannerExceptionFactory.newSpannerException(ErrorCode.DEADLINE_EXCEEDED, ""), 1); - return null; - }) - .when(sessionClient) - .asyncCreateMultiplexedSession(any(MultiplexedSessionMaintainerConsumer.class)); - SessionPool pool = createPool(); - - // Advance clock by 8 days - clock.currentTimeMillis.addAndGet(Duration.ofDays(8).toMillis()); - - // Run one maintenance loop. Attempt replacing stale session should fail. - SessionFutureWrapper session1 = pool.getMultiplexedSessionWithFallback(); - runMaintenanceLoop(clock, pool, 1); - assertTrue(multiplexedSessionsRemoved.isEmpty()); - verify(sessionClient, times(1)) - .asyncCreateMultiplexedSession(any(MultiplexedSessionMaintainerConsumer.class)); - - // Advance clock by 10s and now mock session creation to be successful. - clock.currentTimeMillis.addAndGet(Duration.ofSeconds(10).toMillis()); - doAnswer( - invocation -> { - MultiplexedSessionMaintainerConsumer consumer = - invocation.getArgument(0, MultiplexedSessionMaintainerConsumer.class); - ReadContext mockContext = mock(ReadContext.class); - Timestamp timestamp = - Timestamp.ofTimeSecondsAndNanos( - Instant.ofEpochMilli(clock.currentTimeMillis.get()).getEpochSecond(), 0); - consumer.onSessionReady( - setupMockSession( - buildMockMultiplexedSession(client, mockContext, timestamp.toProto()), - mockContext)); - return null; - }) - .when(sessionClient) - .asyncCreateMultiplexedSession(any(MultiplexedSessionMaintainerConsumer.class)); - // Run one maintenance loop. Attempt should be ignored as it has not been 10 minutes since last - // attempt. - runMaintenanceLoop(clock, pool, 1); - SessionFutureWrapper session2 = pool.getMultiplexedSessionWithFallback(); - assertTrue(multiplexedSessionsRemoved.isEmpty()); - assertEquals(session1.get().getName(), session2.get().getName()); - verify(sessionClient, times(1)) - .asyncCreateMultiplexedSession(any(MultiplexedSessionMaintainerConsumer.class)); - - // Advance clock by 15 minutes - clock.currentTimeMillis.addAndGet(Duration.ofMinutes(15).toMillis()); - // Run one maintenance loop. Attempt should succeed since its already more than 10 minutes since - // the last attempt. - runMaintenanceLoop(clock, pool, 1); - SessionFutureWrapper session3 = pool.getMultiplexedSessionWithFallback(); - assertTrue(getNameOfSessionRemoved().contains(session1.get().get().getName())); - assertNotEquals(session1.get().getName(), session3.get().getName()); - verify(sessionClient, times(2)) - .asyncCreateMultiplexedSession(any(MultiplexedSessionMaintainerConsumer.class)); - } - - private SessionImpl setupMockSession(final SessionImpl session, final ReadContext mockContext) { - final ResultSet mockResult = mock(ResultSet.class); - when(mockContext.executeQuery(any(Statement.class))).thenAnswer(invocation -> mockResult); - when(mockResult.next()).thenReturn(true); - return session; - } - - private SessionPool createPool() { - // Allow sessions to be added to the head of the pool in all cases in this test, as it is - // otherwise impossible to know which session exactly is getting pinged at what point in time. - SessionPool pool = - SessionPool.createPool( - options, - new TestExecutorFactory(), - client.getSessionClient(db), - clock, - Position.FIRST, - new TraceWrapper(Tracing.getTracer(), OpenTelemetry.noop().getTracer("")), - OpenTelemetry.noop()); - pool.multiplexedSessionRemovedListener = - input -> { - multiplexedSessionsRemoved.add(input); - return null; - }; - return pool; - } - - Set getNameOfSessionRemoved() { - return multiplexedSessionsRemoved.stream() - .map(session -> session.getName()) - .collect(Collectors.toSet()); - } -} diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MultiplexedSessionPoolTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MultiplexedSessionPoolTest.java deleted file mode 100644 index ed9926dea88..00000000000 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/MultiplexedSessionPoolTest.java +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Copyright 2024 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.spanner; - -import static com.google.common.truth.Truth.assertThat; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThrows; -import static org.junit.Assert.assertTrue; -import static org.junit.Assume.assumeTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; -import static org.mockito.MockitoAnnotations.initMocks; - -import com.google.cloud.spanner.SessionPool.MultiplexedSessionFuture; -import com.google.cloud.spanner.SessionPool.MultiplexedSessionInitializationConsumer; -import com.google.cloud.spanner.SessionPool.SessionFutureWrapper; -import com.google.cloud.spanner.SpannerImpl.ClosedException; -import io.opencensus.trace.Tracing; -import io.opentelemetry.api.OpenTelemetry; -import java.io.PrintWriter; -import java.io.StringWriter; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import org.mockito.Mock; -import org.threeten.bp.Duration; - -/** - * Tests for {@link com.google.cloud.spanner.SessionPool.MultiplexedSession} component within the - * {@link SessionPool} class. - */ -public class MultiplexedSessionPoolTest extends BaseSessionPoolTest { - - @Mock SpannerImpl client; - @Mock SessionClient sessionClient; - @Mock SpannerOptions spannerOptions; - private final DatabaseId db = DatabaseId.of("projects/p/instances/i/databases/unused"); - private final TraceWrapper tracer = - new TraceWrapper(Tracing.getTracer(), OpenTelemetry.noop().getTracer("")); - SessionPoolOptions options; - SessionPool pool; - - private SessionPool createPool() { - return SessionPool.createPool( - options, - new TestExecutorFactory(), - client.getSessionClient(db), - tracer, - OpenTelemetry.noop()); - } - - @BeforeClass - public static void checkUsesMultiplexedSessionPool() { - assumeTrue("Only run if the maintainer in the session pool is used", false); - } - - @Before - public void setUp() { - initMocks(this); - SpannerOptions.resetActiveTracingFramework(); - SpannerOptions.enableOpenTelemetryTraces(); - when(client.getOptions()).thenReturn(spannerOptions); - when(client.getSessionClient(db)).thenReturn(sessionClient); - when(sessionClient.getSpanner()).thenReturn(client); - when(spannerOptions.getNumChannels()).thenReturn(4); - when(spannerOptions.getDatabaseRole()).thenReturn("role"); - options = - SessionPoolOptions.newBuilder() - .setMinSessions(2) - .setMaxSessions(2) - .setUseMultiplexedSession(true) - .build(); - when(spannerOptions.getSessionPoolOptions()).thenReturn(options); - assumeTrue(options.getUseMultiplexedSession()); - } - - @Test - public void testGetMultiplexedSession_whenSessionInitializationSucceeded_assertSessionReturned() { - setupMockMultiplexedSessionCreation(); - - pool = createPool(); - assertTrue(pool.isValid()); - - // create 5 requests which require a session - for (int i = 0; i < 5; i++) { - // checking out a multiplexed session - SessionFutureWrapper multiplexedSessionFuture = pool.getMultiplexedSessionWithFallback(); - assertNotNull(multiplexedSessionFuture.get()); - } - verify(sessionClient, times(1)) - .asyncCreateMultiplexedSession(any(MultiplexedSessionInitializationConsumer.class)); - } - - @Test - public void testGetMultiplexedSession_whenClosedPool_assertSessionReturned() { - setupMockMultiplexedSessionCreation(); - - pool = createPool(); - assertTrue(pool.isValid()); - closePoolWithStacktrace(); - - // checking out a multiplexed session does not throw error even if pool is closed - MultiplexedSessionFuture multiplexedSessionFuture = - (MultiplexedSessionFuture) pool.getMultiplexedSessionWithFallback().get(); - assertNotNull(multiplexedSessionFuture); - - // checking out a regular session throws error. - IllegalStateException e = assertThrows(IllegalStateException.class, () -> pool.getSession()); - assertThat(e.getCause()).isInstanceOf(ClosedException.class); - StringWriter sw = new StringWriter(); - e.getCause().printStackTrace(new PrintWriter(sw)); - assertThat(sw.toString()).contains("closePoolWithStacktrace"); - } - - private void closePoolWithStacktrace() { - pool.closeAsync(new SpannerImpl.ClosedException()); - } - - @Test - public void testGetMultiplexedSession_whenSessionCreationFailed_assertErrorForWaiters() { - doAnswer( - invocation -> { - MultiplexedSessionInitializationConsumer consumer = - invocation.getArgument(0, MultiplexedSessionInitializationConsumer.class); - consumer.onSessionCreateFailure( - SpannerExceptionFactory.newSpannerException(ErrorCode.DEADLINE_EXCEEDED, ""), 1); - return null; - }) - .when(sessionClient) - .asyncCreateMultiplexedSession(any(MultiplexedSessionInitializationConsumer.class)); - options = - options - .toBuilder() - .setMinSessions(2) - .setUseMultiplexedSession(true) - .setAcquireSessionTimeout( - Duration.ofMillis(50)) // block for a max of 50 ms for session to be available - .build(); - pool = createPool(); - - // create 5 requests which require a session - for (int i = 0; i < 5; i++) { - SpannerException e = - assertThrows( - SpannerException.class, () -> pool.getMultiplexedSessionWithFallback().get().get()); - assertEquals(ErrorCode.DEADLINE_EXCEEDED, e.getErrorCode()); - } - // assert that all 5 requests failed with exception - assertEquals(0, pool.getNumWaiterTimeouts()); - assertEquals(0, pool.getNumberOfSessionsInPool()); - } - - private void setupMockMultiplexedSessionCreation() { - doAnswer( - invocation -> { - MultiplexedSessionInitializationConsumer consumer = - invocation.getArgument(0, MultiplexedSessionInitializationConsumer.class); - consumer.onSessionReady(mockSession()); - return null; - }) - .when(sessionClient) - .asyncCreateMultiplexedSession(any(MultiplexedSessionInitializationConsumer.class)); - } -} diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenCensusApiTracerTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenCensusApiTracerTest.java new file mode 100644 index 00000000000..5e7a58cdb23 --- /dev/null +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenCensusApiTracerTest.java @@ -0,0 +1,427 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.FailOnOverkillTraceComponentImpl.TestSpan; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.SpannerOptions.SpannerEnvironment; +import com.google.cloud.spanner.connection.RandomResultSetGenerator; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import io.grpc.Status; +import io.opencensus.trace.Status.CanonicalCode; +import io.opencensus.trace.Tracing; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.trace.data.SpanData; +import java.lang.reflect.Modifier; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.Assume; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.threeten.bp.Duration; + +@Category(TracerTest.class) +@RunWith(JUnit4.class) +@Ignore("OpenCensus is too intrusive and affects other tests, so this test is by default disabled") +public class OpenCensusApiTracerTest extends AbstractMockServerTest { + private static final Statement SELECT_RANDOM = Statement.of("SELECT * FROM random"); + + private static final Statement UPDATE_RANDOM = Statement.of("UPDATE random SET foo=1 WHERE id=1"); + private static final FailOnOverkillTraceComponentImpl failOnOverkillTraceComponent = + new FailOnOverkillTraceComponentImpl(); + + private DatabaseClient client; + + @BeforeClass + public static void setupOpenTelemetry() throws Exception { + Assume.assumeTrue( + "This test is only supported on JDK11 and lower", + JavaVersionUtil.getJavaMajorVersion() < 12); + + SpannerOptions.resetActiveTracingFramework(); + SpannerOptions.enableOpenCensusTraces(); + + // Use a little reflection to set the test tracer. + // This is not possible in Java 12 and later. + java.lang.reflect.Field field = Tracing.class.getDeclaredField("traceComponent"); + field.setAccessible(true); + java.lang.reflect.Field modifiersField = null; + try { + modifiersField = java.lang.reflect.Field.class.getDeclaredField("modifiers"); + } catch (NoSuchFieldException e) { + // Halt the test and ignore it. + Assume.assumeTrue( + "Skipping test as reflection is not allowed on reflection class in this JDK build", + false); + } + modifiersField.setAccessible(true); + // Remove the final modifier from the 'traceComponent' field. + modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL); + field.set(null, failOnOverkillTraceComponent); + } + + @BeforeClass + public static void setupResults() { + RandomResultSetGenerator generator = new RandomResultSetGenerator(1); + mockSpanner.putStatementResult(StatementResult.query(SELECT_RANDOM, generator.generate())); + mockSpanner.putStatementResults(StatementResult.update(UPDATE_RANDOM, 1L)); + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + failOnOverkillTraceComponent.clearSpans(); + failOnOverkillTraceComponent.clearAnnotations(); + } + + @Override + public void createSpannerInstance() { + SpannerOptions.Builder builder = SpannerOptions.newBuilder(); + // Set a quick polling algorithm to prevent this from slowing down the test unnecessarily. + builder + .getDatabaseAdminStubSettingsBuilder() + .updateDatabaseDdlOperationSettings() + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofNanos(1L)) + .setMaxRetryDelay(Duration.ofNanos(1L)) + .setRetryDelayMultiplier(1.0) + .setTotalTimeout(Duration.ofMinutes(10L)) + .build())); + spanner = + builder + .setProjectId("test-project") + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setWaitForMinSessions(Duration.ofSeconds(5L)) + .setFailOnSessionLeak() + .build()) + .setEnableApiTracing(true) + .build() + .getService(); + client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + } + + @Test + public void testSingleUseQuery() { + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT_RANDOM)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + + Map spans = failOnOverkillTraceComponent.getSpans(); + assertContains("CloudSpanner.ReadOnlyTransaction", spans); + assertContains("CloudSpannerOperation.ExecuteStreamingQuery", spans); + assertContains("Spanner.ExecuteStreamingSql", spans); + } + + @Test + public void testExecuteUpdate() { + assertNotNull( + client.readWriteTransaction().run(transaction -> transaction.executeUpdate(UPDATE_RANDOM))); + + Map spans = failOnOverkillTraceComponent.getSpans(); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + assertContains("CloudSpannerOperation.ExecuteUpdate", spans); + assertContains("CloudSpannerOperation.Commit", spans); + assertContains("Spanner.ExecuteSql", spans); + assertContains("Spanner.Commit", spans); + } + + @Test + public void testBatchUpdate() { + assertNotNull( + client + .readWriteTransaction() + .run( + transaction -> + transaction.batchUpdate(ImmutableList.of(UPDATE_RANDOM, UPDATE_RANDOM)))); + + Map spans = failOnOverkillTraceComponent.getSpans(); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + assertContains("CloudSpannerOperation.BatchUpdate", spans); + assertContains("CloudSpannerOperation.Commit", spans); + assertContains("Spanner.ExecuteBatchDml", spans); + assertContains("Spanner.Commit", spans); + } + + @Test + public void testMultiUseReadOnlyQuery() { + try (ReadOnlyTransaction readOnlyTransaction = client.readOnlyTransaction()) { + try (ResultSet resultSet = readOnlyTransaction.executeQuery(SELECT_RANDOM)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + } + + Map spans = failOnOverkillTraceComponent.getSpans(); + assertContains("CloudSpanner.ReadOnlyTransaction", spans); + assertContains("CloudSpannerOperation.ExecuteStreamingQuery", spans); + assertContains("Spanner.ExecuteStreamingSql", spans); + } + + @Test + public void testReadWriteTransactionQuery() { + client + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet resultSet = transaction.executeQuery(SELECT_RANDOM)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + return null; + }); + + Map spans = failOnOverkillTraceComponent.getSpans(); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + assertContains("CloudSpannerOperation.ExecuteStreamingQuery", spans); + assertContains("CloudSpannerOperation.Commit", spans); + } + + // TODO: Enable test when the problem with overkilling the span has been fixed. + @Ignore("The client.write method overkills the span") + @Test + public void testRetryUnaryRpc() { + mockSpanner.setBeginTransactionExecutionTime( + SimulatedExecutionTime.ofException(Status.UNAVAILABLE.asRuntimeException())); + + // Execute a simple read/write transaction using only mutations. This will use the + // BeginTransaction RPC to start the transaction. That RPC will first return UNAVAILABLE, then + // be retried by Gax, and succeed. The retry should show up in the tracing. + client.write(ImmutableList.of(Mutation.newInsertBuilder("foo").set("bar").to(1L).build())); + + List spans = failOnOverkillTraceComponent.getTestSpans(); + TestSpan span = getSpan("Spanner.BeginTransaction", spans); + assertNotNull(span.getStatus()); + assertEquals(CanonicalCode.OK, span.getStatus().getCanonicalCode()); + } + + @Test + public void testRetryQuery() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException(Status.UNAVAILABLE.asRuntimeException())); + + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT_RANDOM)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + + List spans = failOnOverkillTraceComponent.getTestSpans(); + // UNAVAILABLE errors for the ExecuteStreamingSql RPC is manually retried by the Spanner client + // library, and not by Gax. This means that we get two Gax spans, instead of one with a retry + // attempt. + List executeStreamingSqlSpans = getSpans("Spanner.ExecuteStreamingSql", spans); + assertEquals(2, executeStreamingSqlSpans.size()); + TestSpan span1 = executeStreamingSqlSpans.get(0); + assertNull(span1.getStatus()); + TestSpan span2 = executeStreamingSqlSpans.get(1); + assertNull(span2.getStatus()); + } + + @Test + public void testLroSucceeded() throws Exception { + addUpdateDdlResponse(); + + OperationFuture operationFuture = + spanner + .getDatabaseAdminClient() + .updateDatabaseDdl( + "i", "d", ImmutableList.of("create table foo (id int64) primary key (id)"), null); + assertNull(operationFuture.get()); + + List spans = failOnOverkillTraceComponent.getTestSpans(); + TestSpan updateDatabaseDdl = getSpan("DatabaseAdmin.UpdateDatabaseDdl", spans); + assertNotNull(updateDatabaseDdl); + assertEquals(1, updateDatabaseDdl.getAnnotations().size()); + assertEquals("Attempt succeeded", updateDatabaseDdl.getAnnotations().get(0)); + + TestSpan updateDatabaseDdlOperation = + getSpan("DatabaseAdmin.UpdateDatabaseDdlOperation", spans); + assertTrue(updateDatabaseDdlOperation.getAnnotations().size() >= 2); + assertContainsEvent("Operation started", updateDatabaseDdlOperation.getAnnotations()); + if (updateDatabaseDdlOperation.getAnnotations().size() > 2) { + assertContainsEvent("Scheduling next poll", updateDatabaseDdlOperation.getAnnotations()); + } + assertContainsEvent("Polling completed", updateDatabaseDdlOperation.getAnnotations()); + + // Verify that there are two GetOperations calls for polling the lro. + List polls = getSpans("Operations.GetOperation", spans); + assertEquals(2, polls.size()); + } + + @Test + public void testLroCreationFailed() { + mockDatabaseAdmin.addException(Status.INVALID_ARGUMENT.asRuntimeException()); + + OperationFuture operationFuture = + spanner + .getDatabaseAdminClient() + .updateDatabaseDdl( + "i", "d", ImmutableList.of("create table foo (id int64) primary key (id)"), null); + ExecutionException executionException = + assertThrows(ExecutionException.class, operationFuture::get); + assertEquals( + ErrorCode.INVALID_ARGUMENT, + SpannerExceptionFactory.asSpannerException(executionException.getCause()).getErrorCode()); + + List spans = failOnOverkillTraceComponent.getTestSpans(); + TestSpan updateDatabaseDdl = getSpan("DatabaseAdmin.UpdateDatabaseDdl", spans); + assertEquals(1, updateDatabaseDdl.getAnnotations().size()); + } + + @Test + public void testLroOperationFailed() { + addUpdateDdlError(); + + OperationFuture operationFuture = + spanner + .getDatabaseAdminClient() + .updateDatabaseDdl( + "i", "d", ImmutableList.of("create table foo (id int64) primary key (id)"), null); + ExecutionException executionException = + assertThrows(ExecutionException.class, operationFuture::get); + assertEquals( + ErrorCode.FAILED_PRECONDITION, + SpannerExceptionFactory.asSpannerException(executionException.getCause()).getErrorCode()); + + List spans = failOnOverkillTraceComponent.getTestSpans(); + // Creating the LRO succeeds. + TestSpan updateDatabaseDdl = getSpan("DatabaseAdmin.UpdateDatabaseDdl", spans); + assertEquals(1, updateDatabaseDdl.getAnnotations().size()); + assertEquals("Attempt succeeded", updateDatabaseDdl.getAnnotations().get(0)); + + // The LRO itself returns an error. + TestSpan updateDatabaseDdlOperation = + getSpan("DatabaseAdmin.UpdateDatabaseDdlOperation", spans); + assertTrue(updateDatabaseDdlOperation.getAnnotations().size() >= 2); + assertContainsEvent("Operation started", updateDatabaseDdlOperation.getAnnotations()); + if (updateDatabaseDdlOperation.getAnnotations().size() > 2) { + assertContainsEvent("Starting poll attempt 0", updateDatabaseDdlOperation.getAnnotations()); + } + assertContainsEvent("Polling completed", updateDatabaseDdlOperation.getAnnotations()); + } + + @Test + public void testEnableWithEnvVar() { + SpannerOptions.useEnvironment( + new SpannerEnvironment() { + @Override + public boolean isEnableApiTracing() { + return true; + } + }); + // Create a Spanner instance without explicitly enabling API tracing. + Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setWaitForMinSessions(Duration.ofSeconds(5L)) + .setFailOnSessionLeak() + .build()) + .build() + .getService(); + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT_RANDOM)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + + Map spans = failOnOverkillTraceComponent.getSpans(); + assertContains("CloudSpanner.ReadOnlyTransaction", spans); + assertContains("CloudSpannerOperation.ExecuteStreamingQuery", spans); + assertContains("Spanner.ExecuteStreamingSql", spans); + } + + void assertContains(String expected, Map spans) { + assertTrue( + "Expected " + spansToString(spans) + " to contain " + expected, + spans.keySet().stream().anyMatch(span -> span.equals(expected))); + } + + void assertContainsEvent(String expected, List events) { + assertTrue( + "Expected " + eventsToString(events) + " to contain " + expected, + events.stream().anyMatch(event -> event.equals(expected))); + } + + boolean equalsSpan(SpanData span, String name, Attributes attributes) { + if (!span.getName().equals(name)) { + return false; + } + for (Entry, Object> entry : attributes.asMap().entrySet()) { + if (!span.getAttributes().asMap().containsKey(entry.getKey())) { + return false; + } + if (!Objects.equals(entry.getValue(), span.getAttributes().get(entry.getKey()))) { + return false; + } + } + return true; + } + + TestSpan getSpan(String name, List spans) { + return spans.stream() + .filter(span -> span.getSpanName().equals(name)) + .findAny() + .orElseThrow(() -> new IllegalArgumentException("Span " + name + " not found")); + } + + List getSpans(String name, List spans) { + return spans.stream() + .filter(span -> Objects.equals(span.getSpanName(), name)) + .collect(Collectors.toList()); + } + + private String spansToString(Map spans) { + return spans.keySet().stream().collect(Collectors.joining("\n", "\n", "\n")); + } + + private String eventsToString(List events) { + return events.stream().collect(Collectors.joining("\n", "\n", "\n")); + } +} diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenTelemetryApiTracerTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenTelemetryApiTracerTest.java new file mode 100644 index 00000000000..123f0f486a7 --- /dev/null +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenTelemetryApiTracerTest.java @@ -0,0 +1,514 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.SpannerOptions.SpannerEnvironment; +import com.google.cloud.spanner.connection.RandomResultSetGenerator; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import io.grpc.Status; +import io.opentelemetry.api.GlobalOpenTelemetry; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.StatusCode; +import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator; +import io.opentelemetry.context.propagation.ContextPropagators; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.testing.exporter.InMemorySpanExporter; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.data.EventData; +import io.opentelemetry.sdk.trace.data.SpanData; +import io.opentelemetry.sdk.trace.export.SimpleSpanProcessor; +import java.util.List; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.threeten.bp.Duration; + +@RunWith(JUnit4.class) +public class OpenTelemetryApiTracerTest extends AbstractMockServerTest { + private static final Statement SELECT_RANDOM = Statement.of("SELECT * FROM random"); + + private static final Statement UPDATE_RANDOM = Statement.of("UPDATE random SET foo=1 WHERE id=1"); + private static InMemorySpanExporter spanExporter; + + private static OpenTelemetrySdk openTelemetry; + + private DatabaseClient client; + + @BeforeClass + public static void setupOpenTelemetry() { + SpannerOptions.resetActiveTracingFramework(); + SpannerOptions.enableOpenTelemetryTraces(); + GlobalOpenTelemetry.resetForTest(); + + spanExporter = InMemorySpanExporter.create(); + + SdkTracerProvider tracerProvider = + SdkTracerProvider.builder() + .addSpanProcessor(SimpleSpanProcessor.create(spanExporter)) + .build(); + + openTelemetry = + OpenTelemetrySdk.builder() + .setPropagators(ContextPropagators.create(W3CTraceContextPropagator.getInstance())) + .setTracerProvider(tracerProvider) + .buildAndRegisterGlobal(); + } + + @BeforeClass + public static void setupResults() { + RandomResultSetGenerator generator = new RandomResultSetGenerator(1); + mockSpanner.putStatementResult(StatementResult.query(SELECT_RANDOM, generator.generate())); + mockSpanner.putStatementResults(StatementResult.update(UPDATE_RANDOM, 1L)); + } + + @AfterClass + public static void closeOpenTelemetry() { + if (openTelemetry != null) { + openTelemetry.close(); + } + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + spanExporter.reset(); + } + + @Override + public void createSpannerInstance() { + SpannerOptions.Builder builder = SpannerOptions.newBuilder(); + // Set a quick polling algorithm to prevent this from slowing down the test unnecessarily. + builder + .getDatabaseAdminStubSettingsBuilder() + .updateDatabaseDdlOperationSettings() + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofNanos(1L)) + .setMaxRetryDelay(Duration.ofNanos(1L)) + .setRetryDelayMultiplier(1.0) + .setTotalTimeout(Duration.ofMinutes(10L)) + .build())); + spanner = + builder + .setProjectId("test-project") + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setWaitForMinSessions(Duration.ofSeconds(5L)) + .setFailOnSessionLeak() + .build()) + .setEnableApiTracing(true) + .build() + .getService(); + client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + } + + @Test + public void testSingleUseQuery() { + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT_RANDOM)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpanner.ReadOnlyTransaction", spans); + assertContains("CloudSpannerOperation.ExecuteStreamingQuery", spans); + assertContains("Spanner.ExecuteStreamingSql", spans); + assertParent( + "CloudSpanner.ReadOnlyTransaction", "CloudSpannerOperation.ExecuteStreamingQuery", spans); + assertParent( + "CloudSpannerOperation.ExecuteStreamingQuery", "Spanner.ExecuteStreamingSql", spans); + } + + @Test + public void testExecuteUpdate() { + assertNotNull( + client.readWriteTransaction().run(transaction -> transaction.executeUpdate(UPDATE_RANDOM))); + + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + assertContains("CloudSpannerOperation.ExecuteUpdate", spans); + assertContains("CloudSpannerOperation.Commit", spans); + assertContains("Spanner.ExecuteSql", spans); + assertContains("Spanner.Commit", spans); + + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.ExecuteUpdate", spans); + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.Commit", spans); + assertParent("CloudSpannerOperation.ExecuteUpdate", "Spanner.ExecuteSql", spans); + } + + @Test + public void testBatchUpdate() { + assertNotNull( + client + .readWriteTransaction() + .run( + transaction -> + transaction.batchUpdate(ImmutableList.of(UPDATE_RANDOM, UPDATE_RANDOM)))); + + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + assertContains("CloudSpannerOperation.BatchUpdate", spans); + assertContains("CloudSpannerOperation.Commit", spans); + assertContains("Spanner.ExecuteBatchDml", spans); + assertContains("Spanner.Commit", spans); + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.BatchUpdate", spans); + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.Commit", spans); + assertParent("CloudSpannerOperation.BatchUpdate", "Spanner.ExecuteBatchDml", spans); + assertParent("CloudSpannerOperation.Commit", "Spanner.Commit", spans); + } + + @Test + public void testMultiUseReadOnlyQuery() { + try (ReadOnlyTransaction readOnlyTransaction = client.readOnlyTransaction()) { + try (ResultSet resultSet = readOnlyTransaction.executeQuery(SELECT_RANDOM)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + } + + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpanner.ReadOnlyTransaction", spans); + assertContains("CloudSpannerOperation.ExecuteStreamingQuery", spans); + assertContains("Spanner.ExecuteStreamingSql", spans); + assertParent( + "CloudSpanner.ReadOnlyTransaction", + "CloudSpannerOperation.ExecuteStreamingQuery", + Attributes.empty(), + spans); + assertParent( + "CloudSpannerOperation.ExecuteStreamingQuery", + "Spanner.ExecuteStreamingSql", + Attributes.empty(), + spans); + } + + @Test + public void testReadWriteTransactionQuery() { + client + .readWriteTransaction() + .run( + transaction -> { + try (ResultSet resultSet = transaction.executeQuery(SELECT_RANDOM)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + return null; + }); + + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + assertContains("CloudSpannerOperation.ExecuteStreamingQuery", spans); + assertContains("CloudSpannerOperation.Commit", spans); + assertParent( + "CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.ExecuteStreamingQuery", spans); + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.Commit", spans); + assertParent( + "CloudSpannerOperation.ExecuteStreamingQuery", "Spanner.ExecuteStreamingSql", spans); + } + + @Test + public void testRetryUnaryRpc() { + mockSpanner.setBeginTransactionExecutionTime( + SimulatedExecutionTime.ofException(Status.UNAVAILABLE.asRuntimeException())); + + // Execute a simple read/write transaction using only mutations. This will use the + // BeginTransaction RPC to start the transaction. That RPC will first return UNAVAILABLE, then + // be retried by Gax, and succeed. The retry should show up in the tracing. + client.write(ImmutableList.of(Mutation.newInsertBuilder("foo").set("bar").to(1L).build())); + + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + SpanData span = getSpan("Spanner.BeginTransaction", spans); + assertEquals(StatusCode.OK, span.getStatus().getStatusCode()); + assertEquals(3, span.getTotalRecordedEvents()); + List events = span.getEvents(); + assertEquals("Attempt failed, scheduling next attempt", events.get(0).getName()); + assertEquals("Starting RPC retry 1", events.get(1).getName()); + assertEquals("Attempt succeeded", events.get(2).getName()); + } + + @Test + public void testRetryQuery() { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException(Status.UNAVAILABLE.asRuntimeException())); + + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT_RANDOM)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + // UNAVAILABLE errors for the ExecuteStreamingSql RPC is manually retried by the Spanner client + // library, and not by Gax. This means that we get two Gax spans, instead of one with a retry + // attempt. + List executeStreamingSqlSpans = + getSpans("Spanner.ExecuteStreamingSql", Attributes.empty(), spans); + assertEquals(2, executeStreamingSqlSpans.size()); + SpanData span1 = executeStreamingSqlSpans.get(0); + assertEquals(StatusCode.ERROR, span1.getStatus().getStatusCode()); + SpanData span2 = executeStreamingSqlSpans.get(1); + assertEquals(StatusCode.OK, span2.getStatus().getStatusCode()); + } + + @Test + public void testLroSucceeded() throws Exception { + addUpdateDdlResponse(); + + OperationFuture operationFuture = + spanner + .getDatabaseAdminClient() + .updateDatabaseDdl( + "i", "d", ImmutableList.of("create table foo (id int64) primary key (id)"), null); + assertNull(operationFuture.get()); + + // Wait until the last span has been exported, which can take a few microseconds, as it is + // added by a gRPC executor thread. + do { + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + } while (getSpans( + "DatabaseAdmin.UpdateDatabaseDdlOperation", + Attributes.empty(), + spanExporter.getFinishedSpanItems()) + .isEmpty() + || getSpans( + "Operations.GetOperation", + Attributes.empty(), + spanExporter.getFinishedSpanItems()) + .size() + < 2); + List spans = spanExporter.getFinishedSpanItems(); + + SpanData updateDatabaseDdl = getSpan("DatabaseAdmin.UpdateDatabaseDdl", spans); + assertEquals(1, updateDatabaseDdl.getTotalRecordedEvents()); + assertEquals("Attempt succeeded", updateDatabaseDdl.getEvents().get(0).getName()); + + SpanData updateDatabaseDdlOperation = + getSpan("DatabaseAdmin.UpdateDatabaseDdlOperation", spans); + assertTrue(updateDatabaseDdlOperation.getTotalRecordedEvents() >= 5); + assertContainsEvent("Operation started", updateDatabaseDdlOperation.getEvents()); + assertContainsEvent("Starting poll attempt 0", updateDatabaseDdlOperation.getEvents()); + assertContainsEvent("Scheduling next poll", updateDatabaseDdlOperation.getEvents()); + assertContainsEvent("Starting poll attempt 1", updateDatabaseDdlOperation.getEvents()); + assertContainsEvent("Polling completed", updateDatabaseDdlOperation.getEvents()); + + // Verify that there are two GetOperations calls for polling the lro. + List polls = getSpans("Operations.GetOperation", Attributes.empty(), spans); + assertEquals(2, polls.size()); + } + + @Test + public void testLroCreationFailed() { + mockDatabaseAdmin.addException(Status.INVALID_ARGUMENT.asRuntimeException()); + + OperationFuture operationFuture = + spanner + .getDatabaseAdminClient() + .updateDatabaseDdl( + "i", "d", ImmutableList.of("create table foo (id int64) primary key (id)"), null); + ExecutionException executionException = + assertThrows(ExecutionException.class, operationFuture::get); + assertEquals( + ErrorCode.INVALID_ARGUMENT, + SpannerExceptionFactory.asSpannerException(executionException.getCause()).getErrorCode()); + + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + + SpanData updateDatabaseDdl = getSpan("DatabaseAdmin.UpdateDatabaseDdl", spans); + assertEquals(1, updateDatabaseDdl.getTotalRecordedEvents()); + assertEquals( + "Attempt failed, error not retryable", updateDatabaseDdl.getEvents().get(0).getName()); + assertEquals(StatusCode.ERROR, updateDatabaseDdl.getStatus().getStatusCode()); + } + + @Test + public void testLroOperationFailed() { + addUpdateDdlError(); + + OperationFuture operationFuture = + spanner + .getDatabaseAdminClient() + .updateDatabaseDdl( + "i", "d", ImmutableList.of("create table foo (id int64) primary key (id)"), null); + ExecutionException executionException = + assertThrows(ExecutionException.class, operationFuture::get); + assertEquals( + ErrorCode.FAILED_PRECONDITION, + SpannerExceptionFactory.asSpannerException(executionException.getCause()).getErrorCode()); + + // Wait until the last span has been exported, which can take a few microseconds, as it is + // added by a gRPC executor thread. + do { + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + } while (getSpans( + "DatabaseAdmin.UpdateDatabaseDdlOperation", + Attributes.empty(), + spanExporter.getFinishedSpanItems()) + .isEmpty()); + List spans = spanExporter.getFinishedSpanItems(); + + // Creating the LRO succeeds. + SpanData updateDatabaseDdl = getSpan("DatabaseAdmin.UpdateDatabaseDdl", spans); + assertEquals(1, updateDatabaseDdl.getTotalRecordedEvents()); + assertEquals("Attempt succeeded", updateDatabaseDdl.getEvents().get(0).getName()); + assertEquals(StatusCode.OK, updateDatabaseDdl.getStatus().getStatusCode()); + + // The LRO itself returns an error. + SpanData updateDatabaseDdlOperation = + getSpan("DatabaseAdmin.UpdateDatabaseDdlOperation", spans); + assertEquals(3, updateDatabaseDdlOperation.getTotalRecordedEvents()); + assertContainsEvent("Operation started", updateDatabaseDdlOperation.getEvents()); + assertContainsEvent("Starting poll attempt 0", updateDatabaseDdlOperation.getEvents()); + assertContainsEvent("Polling completed", updateDatabaseDdlOperation.getEvents()); + assertEquals(StatusCode.ERROR, updateDatabaseDdlOperation.getStatus().getStatusCode()); + } + + @Test + public void testEnableWithEnvVar() { + SpannerOptions.useEnvironment( + new SpannerEnvironment() { + @Override + public boolean isEnableApiTracing() { + return true; + } + }); + // Create a Spanner instance without explicitly enabling API tracing. + Spanner spanner = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setWaitForMinSessions(Duration.ofSeconds(5L)) + .setFailOnSessionLeak() + .build()) + .build() + .getService(); + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT_RANDOM)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpanner.ReadOnlyTransaction", spans); + assertContains("CloudSpannerOperation.ExecuteStreamingQuery", spans); + assertContains("Spanner.ExecuteStreamingSql", spans); + assertParent( + "CloudSpanner.ReadOnlyTransaction", "CloudSpannerOperation.ExecuteStreamingQuery", spans); + assertParent( + "CloudSpannerOperation.ExecuteStreamingQuery", "Spanner.ExecuteStreamingSql", spans); + } + + void assertContains(String expected, List spans) { + assertTrue( + "Expected " + spansToString(spans) + " to contain " + expected, + spans.stream().anyMatch(span -> span.getName().equals(expected))); + } + + void assertContainsEvent(String expected, List events) { + assertTrue( + "Expected " + eventsToString(events) + " to contain " + expected, + events.stream().anyMatch(event -> event.getName().equals(expected))); + } + + boolean equalsSpan(SpanData span, String name, Attributes attributes) { + if (!span.getName().equals(name)) { + return false; + } + for (Entry, Object> entry : attributes.asMap().entrySet()) { + if (!span.getAttributes().asMap().containsKey(entry.getKey())) { + return false; + } + if (!Objects.equals(entry.getValue(), span.getAttributes().get(entry.getKey()))) { + return false; + } + } + return true; + } + + void assertParent(String expectedParent, String child, List spans) { + SpanData parentSpan = getSpan(expectedParent, spans); + SpanData childSpan = getSpan(child, spans); + assertEquals(parentSpan.getSpanId(), childSpan.getParentSpanId()); + } + + void assertParent( + String expectedParent, String child, Attributes attributes, List spans) { + SpanData parentSpan = getSpan(expectedParent, spans); + List childSpans = getSpans(child, attributes, spans); + for (SpanData childSpan : childSpans) { + assertEquals(parentSpan.getSpanId(), childSpan.getParentSpanId()); + } + } + + SpanData getSpan(String name, List spans) { + return spans.stream() + .filter(span -> span.getName().equals(name)) + .findAny() + .orElseThrow(() -> new IllegalArgumentException("Span " + name + " not found")); + } + + List getSpans(String name, Attributes attributes, List spans) { + return spans.stream() + .filter(span -> equalsSpan(span, name, attributes)) + .collect(Collectors.toList()); + } + + private String spansToString(List spans) { + return spans.stream().map(SpanData::getName).collect(Collectors.joining("\n", "\n", "\n")); + } + + private String eventsToString(List events) { + return events.stream().map(EventData::getName).collect(Collectors.joining("\n", "\n", "\n")); + } +} diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenTelemetryBuiltInMetricsTracerTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenTelemetryBuiltInMetricsTracerTest.java new file mode 100644 index 00000000000..d9586acc956 --- /dev/null +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenTelemetryBuiltInMetricsTracerTest.java @@ -0,0 +1,293 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.tracing.ApiTracerFactory; +import com.google.api.gax.tracing.MetricsTracerFactory; +import com.google.api.gax.tracing.OpenTelemetryMetricsRecorder; +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.connection.RandomResultSetGenerator; +import com.google.common.base.Stopwatch; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Range; +import io.grpc.Status; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import io.opentelemetry.sdk.metrics.data.HistogramPointData; +import io.opentelemetry.sdk.metrics.data.LongPointData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.threeten.bp.Duration; + +@RunWith(JUnit4.class) +public class OpenTelemetryBuiltInMetricsTracerTest extends AbstractMockServerTest { + + private static final Statement SELECT_RANDOM = Statement.of("SELECT * FROM random"); + + private static final Statement UPDATE_RANDOM = Statement.of("UPDATE random SET foo=1 WHERE id=1"); + private static InMemoryMetricReader metricReader; + + private static OpenTelemetry openTelemetry; + + private static Map attributes; + + private static Attributes expectedBaseAttributes; + + private static final long MIN_LATENCY = 0; + + private DatabaseClient client; + + @BeforeClass + public static void setup() { + metricReader = InMemoryMetricReader.create(); + + BuiltInOpenTelemetryMetricsProvider provider = BuiltInOpenTelemetryMetricsProvider.INSTANCE; + + SdkMeterProviderBuilder meterProvider = + SdkMeterProvider.builder().registerMetricReader(metricReader); + + BuiltInMetricsConstant.getAllViews().forEach(meterProvider::registerView); + + String client_name = "spanner-java/"; + openTelemetry = OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build(); + attributes = provider.createClientAttributes("test-project", client_name); + + expectedBaseAttributes = + Attributes.builder() + .put(BuiltInMetricsConstant.PROJECT_ID_KEY, "test-project") + .put(BuiltInMetricsConstant.INSTANCE_CONFIG_ID_KEY, "unknown") + .put( + BuiltInMetricsConstant.LOCATION_ID_KEY, + BuiltInOpenTelemetryMetricsProvider.detectClientLocation()) + .put(BuiltInMetricsConstant.CLIENT_NAME_KEY, client_name) + .put(BuiltInMetricsConstant.CLIENT_UID_KEY, attributes.get("client_uid")) + .put(BuiltInMetricsConstant.CLIENT_HASH_KEY, attributes.get("client_hash")) + .build(); + } + + @BeforeClass + public static void setupResults() { + RandomResultSetGenerator generator = new RandomResultSetGenerator(1); + mockSpanner.putStatementResult(StatementResult.query(SELECT_RANDOM, generator.generate())); + mockSpanner.putStatementResults(StatementResult.update(UPDATE_RANDOM, 1L)); + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + } + + @Override + public void createSpannerInstance() { + SpannerOptions.Builder builder = SpannerOptions.newBuilder(); + + ApiTracerFactory metricsTracerFactory = + new MetricsTracerFactory( + new OpenTelemetryMetricsRecorder(openTelemetry, BuiltInMetricsConstant.METER_NAME), + attributes); + // Set a quick polling algorithm to prevent this from slowing down the test unnecessarily. + builder + .getDatabaseAdminStubSettingsBuilder() + .updateDatabaseDdlOperationSettings() + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofNanos(1L)) + .setMaxRetryDelay(Duration.ofNanos(1L)) + .setRetryDelayMultiplier(1.0) + .setTotalTimeout(Duration.ofMinutes(10L)) + .build())); + spanner = + builder + .setProjectId("test-project") + .setChannelProvider(channelProvider) + .setCredentials(NoCredentials.getInstance()) + .setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setWaitForMinSessions(Duration.ofSeconds(5L)) + .setFailOnSessionLeak() + .build()) + // Setting this to false so that Spanner Options does not register Metrics Tracer + // factory again. + .setEnableBuiltInMetrics(false) + .setApiTracerFactory(metricsTracerFactory) + .build() + .getService(); + client = spanner.getDatabaseClient(DatabaseId.of("test-project", "i", "d")); + } + + @Test + public void testMetricsSingleUseQuery() { + Stopwatch stopwatch = Stopwatch.createStarted(); + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT_RANDOM)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + + long elapsed = stopwatch.elapsed(TimeUnit.MILLISECONDS); + Attributes expectedAttributes = + expectedBaseAttributes + .toBuilder() + .put(BuiltInMetricsConstant.STATUS_KEY, "OK") + .put(BuiltInMetricsConstant.METHOD_KEY, "Spanner.ExecuteStreamingSql") + .build(); + + MetricData operationLatencyMetricData = + getMetricData(metricReader, BuiltInMetricsConstant.OPERATION_LATENCIES_NAME); + long operationLatencyValue = getAggregatedValue(operationLatencyMetricData, expectedAttributes); + assertThat(operationLatencyValue).isIn(Range.closed(MIN_LATENCY, elapsed)); + + MetricData attemptLatencyMetricData = + getMetricData(metricReader, BuiltInMetricsConstant.ATTEMPT_LATENCIES_NAME); + long attemptLatencyValue = getAggregatedValue(attemptLatencyMetricData, expectedAttributes); + assertThat(attemptLatencyValue).isIn(Range.closed(MIN_LATENCY, elapsed)); + + MetricData operationCountMetricData = + getMetricData(metricReader, BuiltInMetricsConstant.OPERATION_COUNT_NAME); + assertThat(getAggregatedValue(operationCountMetricData, expectedAttributes)).isEqualTo(1); + + MetricData attemptCountMetricData = + getMetricData(metricReader, BuiltInMetricsConstant.ATTEMPT_COUNT_NAME); + assertThat(getAggregatedValue(attemptCountMetricData, expectedAttributes)).isEqualTo(1); + } + + @Test + public void testMetricsWithGaxRetryUnaryRpc() { + Stopwatch stopwatch = Stopwatch.createStarted(); + mockSpanner.setBeginTransactionExecutionTime( + SimulatedExecutionTime.ofException(Status.UNAVAILABLE.asRuntimeException())); + + // Execute a simple read/write transaction using only mutations. This will use the + // BeginTransaction RPC to start the transaction. That RPC will first return UNAVAILABLE, then + // be retried by Gax, and succeed. The retry should show up in the tracing. + client.write(ImmutableList.of(Mutation.newInsertBuilder("foo").set("bar").to(1L).build())); + + stopwatch.elapsed(TimeUnit.MILLISECONDS); + + Attributes expectedAttributesBeginTransactionOK = + expectedBaseAttributes + .toBuilder() + .put(BuiltInMetricsConstant.STATUS_KEY, "OK") + .put(BuiltInMetricsConstant.METHOD_KEY, "Spanner.BeginTransaction") + .build(); + + Attributes expectedAttributesBeginTransactionFailed = + expectedBaseAttributes + .toBuilder() + .put(BuiltInMetricsConstant.STATUS_KEY, "UNAVAILABLE") + .put(BuiltInMetricsConstant.METHOD_KEY, "Spanner.BeginTransaction") + .build(); + + MetricData attemptCountMetricData = + getMetricData(metricReader, BuiltInMetricsConstant.ATTEMPT_COUNT_NAME); + assertThat(getAggregatedValue(attemptCountMetricData, expectedAttributesBeginTransactionOK)) + .isEqualTo(1); + // Attempt count should have a failed metric point for Begin Transaction. + assertThat(getAggregatedValue(attemptCountMetricData, expectedAttributesBeginTransactionFailed)) + .isEqualTo(1); + + MetricData operationCountMetricData = + getMetricData(metricReader, BuiltInMetricsConstant.OPERATION_COUNT_NAME); + assertThat(getAggregatedValue(operationCountMetricData, expectedAttributesBeginTransactionOK)) + .isEqualTo(1); + // Operation count should not have a failed metric point for Begin Transaction as overall + // operation is success.. + assertThat( + getAggregatedValue(operationCountMetricData, expectedAttributesBeginTransactionFailed)) + .isEqualTo(0); + } + + private MetricData getMetricData(InMemoryMetricReader reader, String metricName) { + String fullMetricName = BuiltInMetricsConstant.METER_NAME + "/" + metricName; + Collection allMetricData = Collections.emptyList(); + + // Fetch the MetricData with retries + for (int attemptsLeft = 1000; attemptsLeft > 0; attemptsLeft--) { + allMetricData = reader.collectAllMetrics(); + List matchingMetadata = + allMetricData.stream() + .filter(md -> md.getName().equals(fullMetricName)) + .collect(Collectors.toList()); + assertWithMessage( + "Found multiple MetricData with the same name: %s, in: %s", + fullMetricName, matchingMetadata) + .that(matchingMetadata.size()) + .isAtMost(1); + + if (!matchingMetadata.isEmpty()) { + return matchingMetadata.get(0); + } + + try { + Thread.sleep(1); + } catch (InterruptedException interruptedException) { + Thread.currentThread().interrupt(); + throw new RuntimeException(interruptedException); + } + } + + assertTrue(String.format("MetricData is missing for metric {0}", fullMetricName), false); + return null; + } + + private long getAggregatedValue(MetricData metricData, Attributes attributes) { + switch (metricData.getType()) { + case HISTOGRAM: + Optional hd = + metricData.getHistogramData().getPoints().stream() + .filter(pd -> pd.getAttributes().equals(attributes)) + .collect(Collectors.toList()) + .stream() + .findFirst(); + return hd.isPresent() ? (long) hd.get().getSum() / hd.get().getCount() : 0; + case LONG_SUM: + Optional ld = + metricData.getLongSumData().getPoints().stream() + .filter(pd -> pd.getAttributes().equals(attributes)) + .collect(Collectors.toList()) + .stream() + .findFirst(); + return ld.isPresent() ? ld.get().getValue() : 0; + default: + return 0; + } + } +} diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenTelemetrySpanTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenTelemetrySpanTest.java index a351231f0e7..a2aeb887733 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenTelemetrySpanTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OpenTelemetrySpanTest.java @@ -17,7 +17,9 @@ package com.google.cloud.spanner; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; import com.google.api.gax.grpc.testing.LocalChannelProvider; import com.google.cloud.NoCredentials; @@ -25,6 +27,8 @@ import com.google.common.base.Stopwatch; import com.google.common.collect.ImmutableList; import com.google.protobuf.ListValue; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.ExecuteSqlRequest; import com.google.spanner.v1.ResultSetMetadata; import com.google.spanner.v1.StructType; import com.google.spanner.v1.StructType.Field; @@ -45,6 +49,7 @@ import java.lang.reflect.Modifier; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import org.junit.After; import org.junit.AfterClass; @@ -67,7 +72,7 @@ public class OpenTelemetrySpanTest { private static LocalChannelProvider channelProvider; private static MockSpannerServiceImpl mockSpanner; private Spanner spanner; - private DatabaseClient client; + private Spanner spannerWithApiTracing; private static Server server; private static InMemorySpanExporter spanExporter; @@ -236,13 +241,22 @@ public void setUp() throws Exception { .build()); spanner = builder.build().getService(); + spannerWithApiTracing = builder.setEnableApiTracing(true).build().getService(); + } + + DatabaseClient getClient() { + return spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + } - client = spanner.getDatabaseClient(DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); + DatabaseClient getClientWithApiTracing() { + return spannerWithApiTracing.getDatabaseClient( + DatabaseId.of(TEST_PROJECT, TEST_INSTANCE, TEST_DATABASE)); } @After public void tearDown() { spanner.close(); + spannerWithApiTracing.close(); mockSpanner.reset(); mockSpanner.removeAllExecutionTimes(); spanExporter.reset(); @@ -268,6 +282,7 @@ public void singleUse() { int expectedReadOnlyTransactionSingleUseEventsCount = expectedReadOnlyTransactionSingleUseEvents.size(); + DatabaseClient client = getClient(); try (ResultSet rs = client.singleUse().executeQuery(SELECT1)) { while (rs.next()) { // Just consume the result set. @@ -364,6 +379,7 @@ public void multiUse() { int expectedReadOnlyTransactionMultiUseEventsCount = expectedReadOnlyTransactionMultiUseEvents.size(); + DatabaseClient client = getClient(); try (ReadOnlyTransaction tx = client.readOnlyTransaction()) { try (ResultSet rs = tx.executeQuery(SELECT1)) { while (rs.next()) { @@ -424,16 +440,27 @@ public void transactionRunner() { ? ImmutableList.of( "CloudSpannerOperation.CreateMultiplexedSession", "CloudSpannerOperation.BatchCreateSessionsRequest", + "CloudSpannerOperation.ExecuteUpdate", "CloudSpannerOperation.Commit", "CloudSpannerOperation.BatchCreateSessions", "CloudSpanner.ReadWriteTransaction") : ImmutableList.of( "CloudSpannerOperation.BatchCreateSessionsRequest", + "CloudSpannerOperation.ExecuteUpdate", "CloudSpannerOperation.Commit", "CloudSpannerOperation.BatchCreateSessions", "CloudSpanner.ReadWriteTransaction"); + DatabaseClient client = getClient(); TransactionRunner runner = client.readWriteTransaction(); runner.run(transaction -> transaction.executeUpdate(UPDATE_STATEMENT)); + // Wait until the list of spans contains "CloudSpannerOperation.BatchCreateSessions", as this is + // an async operation. + Stopwatch stopwatch = Stopwatch.createStarted(); + while (spanExporter.getFinishedSpanItems().stream() + .noneMatch(span -> span.getName().equals("CloudSpannerOperation.BatchCreateSessions")) + && stopwatch.elapsed(TimeUnit.MILLISECONDS) < 100) { + Thread.yield(); + } List actualSpanItems = new ArrayList<>(); spanExporter .getFinishedSpanItems() @@ -460,6 +487,7 @@ public void transactionRunner() { expectedBatchCreateSessionsEventsCount); break; case "CloudSpannerOperation.Commit": + case "CloudSpannerOperation.ExecuteUpdate": assertEquals(0, spanItem.getEvents().size()); break; case "CloudSpanner.ReadWriteTransaction": @@ -484,11 +512,14 @@ public void transactionRunnerWithError() { "CloudSpannerOperation.CreateMultiplexedSession", "CloudSpannerOperation.BatchCreateSessionsRequest", "CloudSpannerOperation.BatchCreateSessions", + "CloudSpannerOperation.ExecuteUpdate", "CloudSpanner.ReadWriteTransaction") : ImmutableList.of( "CloudSpannerOperation.BatchCreateSessionsRequest", "CloudSpannerOperation.BatchCreateSessions", + "CloudSpannerOperation.ExecuteUpdate", "CloudSpanner.ReadWriteTransaction"); + DatabaseClient client = getClient(); TransactionRunner runner = client.readWriteTransaction(); SpannerException e = assertThrows( @@ -527,6 +558,9 @@ public void transactionRunnerWithError() { expectedReadWriteTransactionErrorEvents, expectedReadWriteTransactionErrorEventsCount); break; + case "CloudSpannerOperation.ExecuteUpdate": + assertEquals(0, spanItem.getEvents().size()); + break; default: assert false; } @@ -538,20 +572,14 @@ public void transactionRunnerWithError() { @Test public void transactionRunnerWithFailedAndBeginTransaction() { List expectedReadWriteTransactionWithCommitAndBeginTransactionSpans = - isMultiplexedSessionsEnabled() - ? ImmutableList.of( - "CloudSpannerOperation.CreateMultiplexedSession", - "CloudSpannerOperation.BeginTransaction", - "CloudSpannerOperation.BatchCreateSessionsRequest", - "CloudSpannerOperation.Commit", - "CloudSpannerOperation.BatchCreateSessions", - "CloudSpanner.ReadWriteTransaction") - : ImmutableList.of( - "CloudSpannerOperation.BeginTransaction", - "CloudSpannerOperation.BatchCreateSessionsRequest", - "CloudSpannerOperation.Commit", - "CloudSpannerOperation.BatchCreateSessions", - "CloudSpanner.ReadWriteTransaction"); + ImmutableList.of( + "CloudSpannerOperation.BeginTransaction", + "CloudSpannerOperation.BatchCreateSessionsRequest", + "CloudSpannerOperation.ExecuteUpdate", + "CloudSpannerOperation.Commit", + "CloudSpannerOperation.BatchCreateSessions", + "CloudSpanner.ReadWriteTransaction"); + DatabaseClient client = getClient(); assertEquals( Long.valueOf(1L), client @@ -575,7 +603,7 @@ public void transactionRunnerWithFailedAndBeginTransaction() { Stopwatch stopwatch = Stopwatch.createStarted(); while (spanExporter.getFinishedSpanItems().size() < expectedReadWriteTransactionWithCommitAndBeginTransactionSpans.size() - && stopwatch.elapsed().compareTo(java.time.Duration.ofMillis(1000)) < 0) { + && stopwatch.elapsed(TimeUnit.MILLISECONDS) < 2000) { Thread.yield(); } @@ -584,7 +612,11 @@ public void transactionRunnerWithFailedAndBeginTransaction() { .getFinishedSpanItems() .forEach( spanItem -> { - actualSpanItems.add(spanItem.getName()); + // Ignore multiplexed sessions, as they are not used by this test and can therefore + // best be ignored, as it is not 100% certain that it has already been created. + if (!"CloudSpannerOperation.CreateMultiplexedSession".equals(spanItem.getName())) { + actualSpanItems.add(spanItem.getName()); + } switch (spanItem.getName()) { case "CloudSpannerOperation.CreateMultiplexedSession": verifyRequestEvents( @@ -606,6 +638,7 @@ public void transactionRunnerWithFailedAndBeginTransaction() { break; case "CloudSpannerOperation.Commit": case "CloudSpannerOperation.BeginTransaction": + case "CloudSpannerOperation.ExecuteUpdate": assertEquals(0, spanItem.getEvents().size()); break; case "CloudSpanner.ReadWriteTransaction": @@ -622,6 +655,167 @@ public void transactionRunnerWithFailedAndBeginTransaction() { verifySpans(actualSpanItems, expectedReadWriteTransactionWithCommitAndBeginTransactionSpans); } + @Test + public void testTransactionRunnerWithRetryOnBeginTransaction() { + // First get the client to ensure that the BatchCreateSessions request has been executed. + DatabaseClient clientWithApiTracing = getClientWithApiTracing(); + + // Register an UNAVAILABLE error on the server. This error will be returned the first time the + // BeginTransaction RPC is called. This RPC is then retried, and the transaction succeeds. + // The retry should be added as an event to the span. + mockSpanner.addException(Status.UNAVAILABLE.asRuntimeException()); + + clientWithApiTracing + .readWriteTransaction() + .run( + transaction -> { + transaction.buffer(Mutation.newInsertBuilder("foo").set("id").to(1L).build()); + return null; + }); + + assertEquals(2, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + int numExpectedSpans = isMultiplexedSessionsEnabled() ? 10 : 8; + waitForFinishedSpans(numExpectedSpans); + List finishedSpans = spanExporter.getFinishedSpanItems(); + List finishedSpanNames = + finishedSpans.stream().map(SpanData::getName).collect(Collectors.toList()); + String actualSpanNames = + finishedSpans.stream().map(SpanData::getName).collect(Collectors.joining("\n", "\n", "\n")); + assertEquals(actualSpanNames, numExpectedSpans, finishedSpans.size()); + + assertTrue(actualSpanNames, finishedSpanNames.contains("CloudSpanner.ReadWriteTransaction")); + assertTrue( + actualSpanNames, finishedSpanNames.contains("CloudSpannerOperation.BeginTransaction")); + assertTrue(actualSpanNames, finishedSpanNames.contains("CloudSpannerOperation.Commit")); + assertTrue( + actualSpanNames, finishedSpanNames.contains("CloudSpannerOperation.BatchCreateSessions")); + assertTrue( + actualSpanNames, + finishedSpanNames.contains("CloudSpannerOperation.BatchCreateSessionsRequest")); + + assertTrue(actualSpanNames, finishedSpanNames.contains("Spanner.BatchCreateSessions")); + assertTrue(actualSpanNames, finishedSpanNames.contains("Spanner.BeginTransaction")); + assertTrue(actualSpanNames, finishedSpanNames.contains("Spanner.Commit")); + + SpanData beginTransactionSpan = + finishedSpans.stream() + .filter(span -> span.getName().equals("Spanner.BeginTransaction")) + .findAny() + .orElseThrow(IllegalStateException::new); + assertTrue( + beginTransactionSpan.toString(), + beginTransactionSpan.getEvents().stream() + .anyMatch(event -> event.getName().equals("Starting RPC retry 1"))); + } + + @Test + public void testSingleUseRetryOnExecuteStreamingSql() { + // First get the client to ensure that the BatchCreateSessions request has been executed. + DatabaseClient clientWithApiTracing = getClientWithApiTracing(); + + // Register an UNAVAILABLE error on the server. This error will be returned the first time the + // BeginTransaction RPC is called. This RPC is then retried, and the transaction succeeds. + // The retry should be added as an event to the span. + mockSpanner.addException(Status.UNAVAILABLE.asRuntimeException()); + + try (ResultSet resultSet = clientWithApiTracing.singleUse().executeQuery(SELECT1)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + int numExpectedSpans = isMultiplexedSessionsEnabled() ? 9 : 7; + waitForFinishedSpans(numExpectedSpans); + List finishedSpans = spanExporter.getFinishedSpanItems(); + List finishedSpanNames = + finishedSpans.stream().map(SpanData::getName).collect(Collectors.toList()); + String actualSpanNames = + finishedSpans.stream().map(SpanData::getName).collect(Collectors.joining("\n", "\n", "\n")); + assertEquals(actualSpanNames, numExpectedSpans, finishedSpans.size()); + + assertTrue(actualSpanNames, finishedSpanNames.contains("CloudSpanner.ReadOnlyTransaction")); + assertTrue( + actualSpanNames, finishedSpanNames.contains("CloudSpannerOperation.ExecuteStreamingQuery")); + assertTrue( + actualSpanNames, finishedSpanNames.contains("CloudSpannerOperation.BatchCreateSessions")); + assertTrue( + actualSpanNames, + finishedSpanNames.contains("CloudSpannerOperation.BatchCreateSessionsRequest")); + + assertTrue(actualSpanNames, finishedSpanNames.contains("Spanner.BatchCreateSessions")); + assertTrue(actualSpanNames, finishedSpanNames.contains("Spanner.ExecuteStreamingSql")); + + // UNAVAILABLE errors on ExecuteStreamingSql are handled manually in the client library, which + // means that the retry event is on this span. + SpanData executeStreamingQuery = + finishedSpans.stream() + .filter(span -> span.getName().equals("CloudSpannerOperation.ExecuteStreamingQuery")) + .findAny() + .orElseThrow(IllegalStateException::new); + assertTrue( + executeStreamingQuery.toString(), + executeStreamingQuery.getEvents().stream() + .anyMatch(event -> event.getName().contains("Stream broken. Safe to retry"))); + } + + @Test + public void testRetryOnExecuteSql() { + // First get the client to ensure that the BatchCreateSessions request has been executed. + DatabaseClient clientWithApiTracing = getClientWithApiTracing(); + + // Register an UNAVAILABLE error on the server. This error will be returned the first time the + // ExecuteSql RPC is called. This RPC is then retried, and the statement succeeds. + // The retry should be added as an event to the span. + mockSpanner.addException(Status.UNAVAILABLE.asRuntimeException()); + + clientWithApiTracing + .readWriteTransaction() + .run(transaction -> transaction.executeUpdate(UPDATE_STATEMENT)); + + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + int numExpectedSpans = isMultiplexedSessionsEnabled() ? 10 : 8; + waitForFinishedSpans(numExpectedSpans); + List finishedSpans = spanExporter.getFinishedSpanItems(); + List finishedSpanNames = + finishedSpans.stream().map(SpanData::getName).collect(Collectors.toList()); + String actualSpanNames = + finishedSpans.stream().map(SpanData::getName).collect(Collectors.joining("\n", "\n", "\n")); + assertEquals(actualSpanNames, numExpectedSpans, finishedSpans.size()); + + assertTrue(actualSpanNames, finishedSpanNames.contains("CloudSpanner.ReadWriteTransaction")); + assertTrue(actualSpanNames, finishedSpanNames.contains("CloudSpannerOperation.Commit")); + assertTrue( + actualSpanNames, finishedSpanNames.contains("CloudSpannerOperation.BatchCreateSessions")); + assertTrue( + actualSpanNames, + finishedSpanNames.contains("CloudSpannerOperation.BatchCreateSessionsRequest")); + + assertTrue(actualSpanNames, finishedSpanNames.contains("Spanner.BatchCreateSessions")); + assertTrue(actualSpanNames, finishedSpanNames.contains("Spanner.ExecuteSql")); + assertTrue(actualSpanNames, finishedSpanNames.contains("Spanner.Commit")); + + SpanData executeSqlSpan = + finishedSpans.stream() + .filter(span -> span.getName().equals("Spanner.ExecuteSql")) + .findAny() + .orElseThrow(IllegalStateException::new); + assertTrue( + executeSqlSpan.toString(), + executeSqlSpan.getEvents().stream() + .anyMatch(event -> event.getName().equals("Starting RPC retry 1"))); + } + + private void waitForFinishedSpans(int numExpectedSpans) { + // Wait for all spans to finish. Failing to do so can cause the test to miss the + // BatchCreateSessions span, as that span is executed asynchronously in the SessionClient, and + // the SessionClient returns the session to the pool before the span has finished fully. + Stopwatch stopwatch = Stopwatch.createStarted(); + while (spanExporter.getFinishedSpanItems().size() < numExpectedSpans + && stopwatch.elapsed().compareTo(java.time.Duration.ofMillis(1000)) < 0) { + Thread.yield(); + } + } + private void verifyRequestEvents(SpanData spanItem, List expectedEvents, int eventCount) { List eventNames = spanItem.getEvents().stream().map(EventData::getName).collect(Collectors.toList()); diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OptionsTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OptionsTest.java index 8c9a5d957e8..38b7a121731 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OptionsTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/OptionsTest.java @@ -18,16 +18,19 @@ import static com.google.common.truth.Truth.assertThat; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; +import com.google.cloud.spanner.Options.RpcOrderBy; import com.google.cloud.spanner.Options.RpcPriority; import com.google.spanner.v1.DirectedReadOptions; import com.google.spanner.v1.DirectedReadOptions.IncludeReplicas; import com.google.spanner.v1.DirectedReadOptions.ReplicaSelection; +import com.google.spanner.v1.ReadRequest.OrderBy; import com.google.spanner.v1.RequestOptions.Priority; import org.junit.Test; import org.junit.runner.RunWith; @@ -79,7 +82,8 @@ public void allOptionsPresent() { Options.limit(10), Options.prefetchChunks(1), Options.dataBoostEnabled(true), - Options.directedRead(DIRECTED_READ_OPTIONS)); + Options.directedRead(DIRECTED_READ_OPTIONS), + Options.orderBy(RpcOrderBy.NO_ORDER)); assertThat(options.hasLimit()).isTrue(); assertThat(options.limit()).isEqualTo(10); assertThat(options.hasPrefetchChunks()).isTrue(); @@ -87,6 +91,7 @@ public void allOptionsPresent() { assertThat(options.hasDataBoostEnabled()).isTrue(); assertTrue(options.dataBoostEnabled()); assertTrue(options.hasDirectedReadOptions()); + assertTrue(options.hasOrderBy()); assertEquals(DIRECTED_READ_OPTIONS, options.directedReadOptions()); } @@ -101,6 +106,7 @@ public void allOptionsAbsent() { assertThat(options.hasTag()).isFalse(); assertThat(options.hasDataBoostEnabled()).isFalse(); assertThat(options.hasDirectedReadOptions()).isFalse(); + assertThat(options.hasOrderBy()).isFalse(); assertNull(options.withExcludeTxnFromChangeStreams()); assertThat(options.toString()).isEqualTo(""); assertThat(options.equals(options)).isTrue(); @@ -182,7 +188,8 @@ public void readOptionsTest() { Options.limit(limit), Options.tag(tag), Options.dataBoostEnabled(true), - Options.directedRead(DIRECTED_READ_OPTIONS)); + Options.directedRead(DIRECTED_READ_OPTIONS), + Options.orderBy(RpcOrderBy.NO_ORDER)); assertThat(options.toString()) .isEqualTo( @@ -197,10 +204,14 @@ public void readOptionsTest() { + " " + "directedReadOptions: " + DIRECTED_READ_OPTIONS + + " " + + "orderBy: " + + RpcOrderBy.NO_ORDER + " "); assertThat(options.tag()).isEqualTo(tag); assertEquals(dataBoost, options.dataBoostEnabled()); assertEquals(DIRECTED_READ_OPTIONS, options.directedReadOptions()); + assertEquals(OrderBy.ORDER_BY_NO_ORDER, options.orderBy()); } @Test @@ -354,6 +365,24 @@ public void testTransactionOptionsPriority() { assertEquals("priority: " + priority + " ", options.toString()); } + @Test + public void testReadOptionsOrderBy() { + RpcOrderBy orderBy = RpcOrderBy.NO_ORDER; + Options options = Options.fromReadOptions(Options.orderBy(orderBy)); + assertTrue(options.hasOrderBy()); + assertEquals("orderBy: " + orderBy + " ", options.toString()); + } + + @Test + public void testReadOptionsWithOrderByEquality() { + Options optionsWithNoOrderBy1 = Options.fromReadOptions(Options.orderBy(RpcOrderBy.NO_ORDER)); + Options optionsWithNoOrderBy2 = Options.fromReadOptions(Options.orderBy(RpcOrderBy.NO_ORDER)); + assertTrue(optionsWithNoOrderBy1.equals(optionsWithNoOrderBy2)); + + Options optionsWithPkOrder = Options.fromReadOptions(Options.orderBy(RpcOrderBy.PRIMARY_KEY)); + assertFalse(optionsWithNoOrderBy1.equals(optionsWithPkOrder)); + } + @Test public void testQueryOptionsPriority() { RpcPriority priority = RpcPriority.MEDIUM; diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReadFormatTestRunner.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReadFormatTestRunner.java index 8d97d9d894b..c973b7e471e 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReadFormatTestRunner.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReadFormatTestRunner.java @@ -114,7 +114,7 @@ private static class TestCaseRunner { } private void run() throws Exception { - stream = new GrpcStreamIterator(10); + stream = new GrpcStreamIterator(10, /*cancelQueryWhenClientIsClosed=*/ false); stream.setCall( new SpannerRpc.StreamingCall() { @Override diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ResumableStreamIteratorTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ResumableStreamIteratorTest.java index 899bec4c622..d126719ebb8 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ResumableStreamIteratorTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ResumableStreamIteratorTest.java @@ -24,6 +24,7 @@ import static org.mockito.Mockito.when; import com.google.api.client.util.BackOff; +import com.google.cloud.spanner.ErrorHandler.DefaultErrorHandler; import com.google.cloud.spanner.v1.stub.SpannerStubSettings; import com.google.common.collect.AbstractIterator; import com.google.common.collect.ImmutableList; @@ -157,7 +158,8 @@ private void initWithLimit(int maxBufferSize) { maxBufferSize, "", new OpenTelemetrySpan(mock(io.opentelemetry.api.trace.Span.class)), - new TraceWrapper(Tracing.getTracer(), OpenTelemetry.noop().getTracer("")), + new TraceWrapper(Tracing.getTracer(), OpenTelemetry.noop().getTracer(""), false), + DefaultErrorHandler.INSTANCE, SpannerStubSettings.newBuilder().executeStreamingSqlSettings().getRetrySettings(), SpannerStubSettings.newBuilder().executeStreamingSqlSettings().getRetryableCodes()) { @Override diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/RetryOnDifferentGrpcChannelMockServerTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/RetryOnDifferentGrpcChannelMockServerTest.java new file mode 100644 index 00000000000..b5e3e2e54cf --- /dev/null +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/RetryOnDifferentGrpcChannelMockServerTest.java @@ -0,0 +1,362 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static io.grpc.Grpc.TRANSPORT_ATTR_REMOTE_ADDR; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import com.google.common.collect.ImmutableSet; +import com.google.spanner.v1.BatchCreateSessionsRequest; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import io.grpc.Attributes; +import io.grpc.Context; +import io.grpc.Deadline; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Metadata; +import io.grpc.ServerCall; +import io.grpc.ServerCall.Listener; +import io.grpc.ServerCallHandler; +import io.grpc.ServerInterceptor; +import io.grpc.Status; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.threeten.bp.Duration; + +@RunWith(JUnit4.class) +public class RetryOnDifferentGrpcChannelMockServerTest extends AbstractMockServerTest { + private static final Map> SERVER_ADDRESSES = new HashMap<>(); + + @BeforeClass + public static void startStaticServer() throws IOException { + System.setProperty("spanner.retry_deadline_exceeded_on_different_channel", "true"); + startStaticServer(createServerInterceptor()); + } + + @AfterClass + public static void removeSystemProperty() { + System.clearProperty("spanner.retry_deadline_exceeded_on_different_channel"); + } + + @After + public void clearRequests() { + SERVER_ADDRESSES.clear(); + mockSpanner.clearRequests(); + mockSpanner.removeAllExecutionTimes(); + } + + static ServerInterceptor createServerInterceptor() { + return new ServerInterceptor() { + @Override + public Listener interceptCall( + ServerCall serverCall, + Metadata metadata, + ServerCallHandler serverCallHandler) { + Attributes attributes = serverCall.getAttributes(); + //noinspection unchecked,deprecation + Attributes.Key key = + (Attributes.Key) + attributes.keys().stream() + .filter(k -> k.equals(TRANSPORT_ATTR_REMOTE_ADDR)) + .findFirst() + .orElse(null); + if (key != null) { + InetSocketAddress address = attributes.get(key); + synchronized (SERVER_ADDRESSES) { + Set addresses = + SERVER_ADDRESSES.getOrDefault( + serverCall.getMethodDescriptor().getFullMethodName(), new HashSet<>()); + addresses.add(address); + SERVER_ADDRESSES.putIfAbsent( + serverCall.getMethodDescriptor().getFullMethodName(), addresses); + } + } + return serverCallHandler.startCall(serverCall, metadata); + } + }; + } + + SpannerOptions.Builder createSpannerOptionsBuilder() { + return SpannerOptions.newBuilder() + .setProjectId("my-project") + .setHost(String.format("http://localhost:%d", getPort())) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .setCredentials(NoCredentials.getInstance()); + } + + @Test + public void testReadWriteTransaction_retriesOnNewChannel() { + SpannerOptions.Builder builder = createSpannerOptionsBuilder(); + builder.setSessionPoolOption( + SessionPoolOptions.newBuilder().setWaitForMinSessions(Duration.ofSeconds(5L)).build()); + mockSpanner.setBeginTransactionExecutionTime( + SimulatedExecutionTime.ofStickyException(Status.DEADLINE_EXCEEDED.asRuntimeException())); + AtomicInteger attempts = new AtomicInteger(); + + try (Spanner spanner = builder.build().getService()) { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + client + .readWriteTransaction() + .run( + transaction -> { + if (attempts.incrementAndGet() > 1) { + mockSpanner.setBeginTransactionExecutionTime( + MockSpannerServiceImpl.NO_EXECUTION_TIME); + } + transaction.buffer(Mutation.newInsertBuilder("foo").set("id").to(1L).build()); + return null; + }); + } + assertEquals(2, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + List requests = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + assertNotEquals(requests.get(0).getSession(), requests.get(1).getSession()); + assertEquals( + 2, + SERVER_ADDRESSES + .getOrDefault("google.spanner.v1.Spanner/BeginTransaction", ImmutableSet.of()) + .size()); + } + + @Test + public void testReadWriteTransaction_stopsRetrying() { + SpannerOptions.Builder builder = createSpannerOptionsBuilder(); + builder.setSessionPoolOption( + SessionPoolOptions.newBuilder().setWaitForMinSessions(Duration.ofSeconds(5L)).build()); + mockSpanner.setBeginTransactionExecutionTime( + SimulatedExecutionTime.ofStickyException(Status.DEADLINE_EXCEEDED.asRuntimeException())); + + try (Spanner spanner = builder.build().getService()) { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + client + .readWriteTransaction() + .run( + transaction -> { + transaction.buffer( + Mutation.newInsertBuilder("foo").set("id").to(1L).build()); + return null; + })); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, exception.getErrorCode()); + + int numChannels = spanner.getOptions().getNumChannels(); + assertEquals(numChannels, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + List requests = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + Set sessions = + requests.stream().map(BeginTransactionRequest::getSession).collect(Collectors.toSet()); + assertEquals(numChannels, sessions.size()); + assertEquals( + numChannels, + SERVER_ADDRESSES + .getOrDefault("google.spanner.v1.Spanner/BeginTransaction", ImmutableSet.of()) + .size()); + } + } + + @Test + public void testDenyListedChannelIsCleared() { + FakeClock clock = new FakeClock(); + SpannerOptions.Builder builder = createSpannerOptionsBuilder(); + builder.setSessionPoolOption( + SessionPoolOptions.newBuilder() + .setWaitForMinSessions(Duration.ofSeconds(5)) + .setPoolMaintainerClock(clock) + .build()); + mockSpanner.setBeginTransactionExecutionTime( + SimulatedExecutionTime.ofStickyException(Status.DEADLINE_EXCEEDED.asRuntimeException())); + + try (Spanner spanner = builder.build().getService()) { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + + // Retry until all channels have been deny-listed. + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + client + .readWriteTransaction() + .run( + transaction -> { + transaction.buffer( + Mutation.newInsertBuilder("foo").set("id").to(1L).build()); + return null; + })); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, exception.getErrorCode()); + + // Now advance the clock by 1 minute. This should clear all deny-listed channels. + clock.currentTimeMillis.addAndGet(TimeUnit.MILLISECONDS.convert(2L, TimeUnit.MINUTES)); + AtomicInteger attempts = new AtomicInteger(); + client + .readWriteTransaction() + .run( + transaction -> { + if (attempts.incrementAndGet() > 1) { + mockSpanner.setBeginTransactionExecutionTime(SimulatedExecutionTime.none()); + } + transaction.buffer(Mutation.newInsertBuilder("foo").set("id").to(1L).build()); + return null; + }); + + int numChannels = spanner.getOptions().getNumChannels(); + // We should have numChannels BeginTransactionRequests from the first transaction, and 2 from + // the second transaction. + assertEquals(numChannels + 2, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + List requests = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class); + // The requests should all use different sessions, as deny-listing a session will bring it to + // the back of the session pool. + Set sessions = + requests.stream().map(BeginTransactionRequest::getSession).collect(Collectors.toSet()); + // We should have used numChannels+1==5 sessions. The reason for that is that first 3 attempts + // of the first transaction used 3 different sessions, that were then all deny-listed. The + // 4th attempt also failed, but as it would be the last channel to be deny-listed, it was not + // deny-listed and instead added to the front of the pool. + // The first attempt of the second transaction then uses the same session as the last attempt + // of the first transaction. That fails, the session is deny-listed, the transaction is + // retried on yet another session and succeeds. + assertEquals(numChannels + 1, sessions.size()); + assertEquals( + numChannels, + SERVER_ADDRESSES + .getOrDefault("google.spanner.v1.Spanner/BeginTransaction", ImmutableSet.of()) + .size()); + assertEquals(numChannels, mockSpanner.countRequestsOfType(BatchCreateSessionsRequest.class)); + } + } + + @Test + public void testSingleUseQuery_retriesOnNewChannel() { + SpannerOptions.Builder builder = createSpannerOptionsBuilder(); + builder.setSessionPoolOption( + SessionPoolOptions.newBuilder().setUseMultiplexedSession(true).build()); + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofException(Status.DEADLINE_EXCEEDED.asRuntimeException())); + + try (Spanner spanner = builder.build().getService()) { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT1_STATEMENT)) { + assertTrue(resultSet.next()); + assertEquals(1L, resultSet.getLong(0)); + assertFalse(resultSet.next()); + } + } + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + // The requests use the same multiplexed session. + assertEquals(requests.get(0).getSession(), requests.get(1).getSession()); + // The requests use two different gRPC channels. + assertEquals( + 2, + SERVER_ADDRESSES + .getOrDefault("google.spanner.v1.Spanner/ExecuteStreamingSql", ImmutableSet.of()) + .size()); + } + + @Test + public void testSingleUseQuery_stopsRetrying() { + SpannerOptions.Builder builder = createSpannerOptionsBuilder(); + builder.setSessionPoolOption( + SessionPoolOptions.newBuilder().setUseMultiplexedSession(true).build()); + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofStickyException(Status.DEADLINE_EXCEEDED.asRuntimeException())); + + try (Spanner spanner = builder.build().getService()) { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + try (ResultSet resultSet = client.singleUse().executeQuery(SELECT1_STATEMENT)) { + SpannerException exception = assertThrows(SpannerException.class, resultSet::next); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, exception.getErrorCode()); + } + int numChannels = spanner.getOptions().getNumChannels(); + assertEquals(numChannels, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + List requests = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class); + // The requests use the same multiplexed session. + String session = requests.get(0).getSession(); + for (ExecuteSqlRequest request : requests) { + assertEquals(session, request.getSession()); + } + // The requests use all gRPC channels. + assertEquals( + numChannels, + SERVER_ADDRESSES + .getOrDefault("google.spanner.v1.Spanner/ExecuteStreamingSql", ImmutableSet.of()) + .size()); + } + } + + @Test + public void testReadWriteTransaction_withGrpcContextDeadline_doesNotRetry() { + SpannerOptions.Builder builder = createSpannerOptionsBuilder(); + builder.setSessionPoolOption( + SessionPoolOptions.newBuilder().setWaitForMinSessions(Duration.ofSeconds(5L)).build()); + mockSpanner.setBeginTransactionExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(500, 500)); + + try (Spanner spanner = builder.build().getService()) { + DatabaseClient client = spanner.getDatabaseClient(DatabaseId.of("p", "i", "d")); + ScheduledExecutorService service = Executors.newScheduledThreadPool(1); + Context context = + Context.current().withDeadline(Deadline.after(50L, TimeUnit.MILLISECONDS), service); + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + context.run( + () -> + client + .readWriteTransaction() + .run( + transaction -> { + transaction.buffer( + Mutation.newInsertBuilder("foo").set("id").to(1L).build()); + return null; + }))); + assertEquals(ErrorCode.DEADLINE_EXCEEDED, exception.getErrorCode()); + } + // A gRPC context deadline will still cause the underlying error handler to try to retry the + // transaction on a new channel, but as the deadline has been exceeded even before those RPCs + // are being executed, the RPC invocation will be skipped, and the error will eventually bubble + // up. + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + } +} diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionClientTests.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionClientTests.java index c0ae8de97c9..bcba430c521 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionClientTests.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionClientTests.java @@ -92,7 +92,7 @@ public static Collection data() { @Mock private SpannerRpc rpc; @Mock private SpannerOptions spannerOptions; private final TraceWrapper tracer = - new TraceWrapper(Tracing.getTracer(), OpenTelemetry.noop().getTracer("")); + new TraceWrapper(Tracing.getTracer(), OpenTelemetry.noop().getTracer(""), false); @Mock private ISpan span; @Captor ArgumentCaptor> options; diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionImplTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionImplTest.java index 72befe8a2b4..2a850514d0d 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionImplTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionImplTest.java @@ -138,6 +138,8 @@ public void setUp() { when(rpc.getExecuteQueryRetryableCodes()) .thenReturn( SpannerStubSettings.newBuilder().executeStreamingSqlSettings().getRetryableCodes()); + when(rpc.getCommitRetrySettings()) + .thenReturn(SpannerStubSettings.newBuilder().commitSettings().getRetrySettings()); session = spanner.getSessionClient(db).createSession(); Span oTspan = mock(Span.class); ISpan span = new OpenTelemetrySpan(oTspan); diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionPoolMaintainerTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionPoolMaintainerTest.java index 9e55851ef4d..db4e79113fc 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionPoolMaintainerTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionPoolMaintainerTest.java @@ -134,7 +134,7 @@ private SessionPool createPool(SessionPoolOptions options) throws Exception { client.getSessionClient(db), clock, Position.FIRST, - new TraceWrapper(Tracing.getTracer(), OpenTelemetry.noop().getTracer("")), + new TraceWrapper(Tracing.getTracer(), OpenTelemetry.noop().getTracer(""), false), OpenTelemetry.noop()); pool.idleSessionRemovedListener = input -> { diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionPoolOptionsTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionPoolOptionsTest.java index 76123d0ac68..2e3e2c85da3 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionPoolOptionsTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionPoolOptionsTest.java @@ -28,6 +28,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; +import java.util.concurrent.ThreadLocalRandom; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @@ -169,6 +170,36 @@ public void setCloseIfInactiveTransactions() { assertTrue(sessionPoolOptions.closeInactiveTransactions()); } + @Test + public void testSetUsedSessionsRatioThreshold() { + double threshold = ThreadLocalRandom.current().nextDouble(); + InactiveTransactionRemovalOptions inactiveTransactionRemovalOptions = + InactiveTransactionRemovalOptions.newBuilder() + .setUsedSessionsRatioThreshold(threshold) + .build(); + assertEquals( + threshold, inactiveTransactionRemovalOptions.getUsedSessionsRatioThreshold(), 0.0d); + } + + @Test + public void testBlockIfPoolExhausted() { + assertTrue(SessionPoolOptions.newBuilder().build().isBlockIfPoolExhausted()); + assertTrue( + SessionPoolOptions.newBuilder().setBlockIfPoolExhausted().build().isBlockIfPoolExhausted()); + assertFalse( + SessionPoolOptions.newBuilder().setFailIfPoolExhausted().build().isBlockIfPoolExhausted()); + } + + @Test + public void testFailIfSessionNotFound() { + assertFalse(SessionPoolOptions.newBuilder().build().isFailIfSessionNotFound()); + assertTrue( + SessionPoolOptions.newBuilder() + .setFailIfSessionNotFound() + .build() + .isFailIfSessionNotFound()); + } + @Test(expected = IllegalArgumentException.class) public void setNegativeExecutionFrequency() { InactiveTransactionRemovalOptions inactiveTransactionRemovalOptions = @@ -268,6 +299,52 @@ public void testUseMultiplexedSession() { .getUseMultiplexedSession()); } + @Test + public void testUseMultiplexedSessionForRW() { + // skip these tests since this configuration can have dual behaviour in different test-runners + assumeFalse(SessionPoolOptions.newBuilder().build().getUseMultiplexedSession()); + assumeFalse(SessionPoolOptions.newBuilder().build().getUseMultiplexedSessionForRW()); + + // Verify default client behavior for multiplexed sessions in R/W transactions + assertEquals(false, SessionPoolOptions.newBuilder().build().getUseMultiplexedSessionForRW()); + + // Client will use multiplexed sessions for R/W transactions if both the fields are set to true. + assertEquals( + true, + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(true) + .setUseMultiplexedSessionForRW(true) + .build() + .getUseMultiplexedSessionForRW()); + // Client will not use multiplexed sessions for R/W transactions, since one of the field is set + // to false. + assertEquals( + false, + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(true) + .setUseMultiplexedSessionForRW(false) + .build() + .getUseMultiplexedSessionForRW()); + // Client will not use multiplexed sessions for R/W transactions, since one of the field is set + // to false. + assertEquals( + false, + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(false) + .setUseMultiplexedSessionForRW(true) + .build() + .getUseMultiplexedSessionForRW()); + // Client will not use multiplexed sessions for R/W transactions, since both the fields are set + // to false. + assertEquals( + false, + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(false) + .setUseMultiplexedSessionForRW(false) + .build() + .getUseMultiplexedSessionForRW()); + } + @Test public void testMultiplexedSessionMaintenanceDuration() { assertEquals( @@ -287,4 +364,99 @@ public void testMultiplexedSessionMaintenanceDuration() { .build() .getMultiplexedSessionMaintenanceDuration()); } + + @Test + public void testToBuilder() { + assertToBuilderRoundtrip(SessionPoolOptions.newBuilder().build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setUseMultiplexedSession(ThreadLocalRandom.current().nextBoolean()) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setUseMultiplexedSessionForRW(ThreadLocalRandom.current().nextBoolean()) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setMinSessions(ThreadLocalRandom.current().nextInt(400)) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setMaxSessions(ThreadLocalRandom.current().nextInt(1, 1000)) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setIncStep(ThreadLocalRandom.current().nextInt(1, 1000)) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setMaxIdleSessions(ThreadLocalRandom.current().nextInt(1000)) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setWriteSessionsFraction(ThreadLocalRandom.current().nextFloat()) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setInactiveTransactionRemovalOptions( + InactiveTransactionRemovalOptions.newBuilder() + .setUsedSessionsRatioThreshold(ThreadLocalRandom.current().nextDouble()) + .build()) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setLoopFrequency(ThreadLocalRandom.current().nextInt(1000)) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setMultiplexedSessionMaintenanceLoopFrequency( + java.time.Duration.ofMillis(ThreadLocalRandom.current().nextInt(1000))) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setKeepAliveIntervalMinutes(ThreadLocalRandom.current().nextInt(60)) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setRemoveInactiveSessionAfter( + Duration.ofMillis(ThreadLocalRandom.current().nextLong(10000))) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder().setCloseIfInactiveTransactions().build()); + assertToBuilderRoundtrip(SessionPoolOptions.newBuilder().setFailOnSessionLeak().build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setTrackStackTraceOfSessionCheckout(ThreadLocalRandom.current().nextBoolean()) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setInitialWaitForSessionTimeoutMillis(ThreadLocalRandom.current().nextLong(1000)) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setAutoDetectDialect(ThreadLocalRandom.current().nextBoolean()) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setWaitForMinSessions(Duration.ofMillis(ThreadLocalRandom.current().nextLong(10000))) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setAcquireSessionTimeout( + Duration.ofMillis(ThreadLocalRandom.current().nextLong(1, 10000))) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setRandomizePositionQPSThreshold(ThreadLocalRandom.current().nextLong(10000)) + .build()); + assertToBuilderRoundtrip( + SessionPoolOptions.newBuilder() + .setMultiplexedSessionMaintenanceDuration( + Duration.ofMillis(ThreadLocalRandom.current().nextLong(10000))) + .build()); + } + + static void assertToBuilderRoundtrip(SessionPoolOptions options) { + assertEquals(options, options.toBuilder().build()); + } } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionPoolStressTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionPoolStressTest.java index 6d2d1f19efe..33771962828 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionPoolStressTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionPoolStressTest.java @@ -228,7 +228,7 @@ public void stressTest() throws Exception { mockSpanner.getSessionClient(db), clock, Position.RANDOM, - new TraceWrapper(Tracing.getTracer(), OpenTelemetry.noop().getTracer("")), + new TraceWrapper(Tracing.getTracer(), OpenTelemetry.noop().getTracer(""), false), OpenTelemetry.noop()); pool.idleSessionRemovedListener = pooled -> { diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionPoolTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionPoolTest.java index ab7eb80cf90..998678e4296 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionPoolTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionPoolTest.java @@ -59,12 +59,12 @@ import com.google.api.core.ApiFutures; import com.google.cloud.Timestamp; +import com.google.cloud.spanner.ErrorHandler.DefaultErrorHandler; import com.google.cloud.spanner.MetricRegistryTestUtils.FakeMetricRegistry; import com.google.cloud.spanner.MetricRegistryTestUtils.MetricsRecord; import com.google.cloud.spanner.MetricRegistryTestUtils.PointWithFunction; import com.google.cloud.spanner.ReadContext.QueryAnalyzeMode; import com.google.cloud.spanner.SessionClient.SessionConsumer; -import com.google.cloud.spanner.SessionPool.MultiplexedSessionInitializationConsumer; import com.google.cloud.spanner.SessionPool.PooledSession; import com.google.cloud.spanner.SessionPool.PooledSessionFuture; import com.google.cloud.spanner.SessionPool.Position; @@ -75,6 +75,7 @@ import com.google.cloud.spanner.spi.v1.SpannerRpc; import com.google.cloud.spanner.spi.v1.SpannerRpc.ResultStreamConsumer; import com.google.cloud.spanner.v1.stub.SpannerStubSettings; +import com.google.common.base.Stopwatch; import com.google.common.collect.Lists; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.Uninterruptibles; @@ -153,7 +154,7 @@ public class SessionPoolTest extends BaseSessionPoolTest { private String TEST_DATABASE_ROLE = "my-role"; private final TraceWrapper tracer = - new TraceWrapper(Tracing.getTracer(), OpenTelemetry.noop().getTracer("")); + new TraceWrapper(Tracing.getTracer(), OpenTelemetry.noop().getTracer(""), false); @Parameters(name = "min sessions = {0}") public static Collection data() { @@ -1294,7 +1295,7 @@ public void blockAndTimeoutOnPoolExhaustion_withAcquireSessionTimeout() throws E .setMinSessions(minSessions) .setMaxSessions(1) .setInitialWaitForSessionTimeoutMillis(20L) - .setAcquireSessionTimeout(Duration.ofMillis(20L)) + .setAcquireSessionTimeout(null) .build(); setupMockSessionCreation(); pool = createPool(); @@ -1307,18 +1308,18 @@ public void blockAndTimeoutOnPoolExhaustion_withAcquireSessionTimeout() throws E Future fut = executor.submit( () -> { - latch.countDown(); PooledSessionFuture session = pool.getSession(); + latch.countDown(); + session.get(); session.close(); return null; }); // Wait until the background thread is actually waiting for a session. latch.await(); // Wait until the request has timed out. - int waitCount = 0; - while (pool.getNumWaiterTimeouts() == 0L && waitCount < 5000) { - Thread.sleep(1L); - waitCount++; + Stopwatch watch = Stopwatch.createStarted(); + while (pool.getNumWaiterTimeouts() == 0L && watch.elapsed(TimeUnit.MILLISECONDS) < 1000) { + Thread.yield(); } // Return the checked out session to the pool so the async request will get a session and // finish. @@ -1326,10 +1327,11 @@ public void blockAndTimeoutOnPoolExhaustion_withAcquireSessionTimeout() throws E // Verify that the async request also succeeds. fut.get(10L, TimeUnit.SECONDS); executor.shutdown(); + assertTrue(executor.awaitTermination(10L, TimeUnit.SECONDS)); // Verify that the session was returned to the pool and that we can get it again. - Session session = pool.getSession(); - assertThat(session).isNotNull(); + PooledSessionFuture session = pool.getSession(); + assertThat(session.get()).isNotNull(); session.close(); assertThat(pool.getNumWaiterTimeouts()).isAtLeast(1L); } @@ -1477,6 +1479,7 @@ public void testSessionNotFoundReadWriteTransaction() { final SessionImpl closedSession = mock(SessionImpl.class); when(closedSession.getName()) .thenReturn("projects/dummy/instances/dummy/database/dummy/sessions/session-closed"); + when(closedSession.getErrorHandler()).thenReturn(DefaultErrorHandler.INSTANCE); Span oTspan = mock(Span.class); ISpan span = new OpenTelemetrySpan(oTspan); @@ -1494,13 +1497,14 @@ public void testSessionNotFoundReadWriteTransaction() { .thenReturn(ApiFutures.immediateFuture(Empty.getDefaultInstance())); when(closedSession.newTransaction(Options.fromTransactionOptions())) .thenReturn(closedTransactionContext); - when(closedSession.beginTransactionAsync(any(), eq(true))).thenThrow(sessionNotFound); + when(closedSession.beginTransactionAsync(any(), eq(true), any())).thenThrow(sessionNotFound); when(closedSession.getTracer()).thenReturn(tracer); TransactionRunnerImpl closedTransactionRunner = new TransactionRunnerImpl(closedSession); closedTransactionRunner.setSpan(span); when(closedSession.readWriteTransaction()).thenReturn(closedTransactionRunner); final SessionImpl openSession = mock(SessionImpl.class); + when(openSession.getErrorHandler()).thenReturn(DefaultErrorHandler.INSTANCE); when(openSession.asyncClose()) .thenReturn(ApiFutures.immediateFuture(Empty.getDefaultInstance())); when(openSession.getName()) @@ -1508,7 +1512,7 @@ public void testSessionNotFoundReadWriteTransaction() { final TransactionContextImpl openTransactionContext = mock(TransactionContextImpl.class); when(openSession.newTransaction(Options.fromTransactionOptions())) .thenReturn(openTransactionContext); - when(openSession.beginTransactionAsync(any(), eq(true))) + when(openSession.beginTransactionAsync(any(), eq(true), any())) .thenReturn(ApiFutures.immediateFuture(ByteString.copyFromUtf8("open-txn"))); when(openSession.getTracer()).thenReturn(tracer); TransactionRunnerImpl openTransactionRunner = new TransactionRunnerImpl(openSession); @@ -2023,14 +2027,16 @@ public void testOpenCensusMetricsDisable() { public void testOpenTelemetrySessionMetrics() throws Exception { SpannerOptions.resetActiveTracingFramework(); SpannerOptions.enableOpenTelemetryMetrics(); - // Create a session pool with max 2 session and a low timeout for waiting for a session. + // Create a session pool with max 3 session and a low timeout for waiting for a session. if (minSessions == 1) { options = SessionPoolOptions.newBuilder() .setMinSessions(1) .setMaxSessions(3) - .setMaxIdleSessions(0) - .setInitialWaitForSessionTimeoutMillis(50L) + // This must be set to null for the setInitialWaitForSessionTimeoutMillis call to have + // any effect. + .setAcquireSessionTimeout(null) + .setInitialWaitForSessionTimeoutMillis(1L) .build(); FakeClock clock = new FakeClock(); clock.currentTimeMillis.set(System.currentTimeMillis()); @@ -2081,26 +2087,29 @@ public void testOpenTelemetrySessionMetrics() throws Exception { Future fut = executor.submit( () -> { + PooledSessionFuture session = pool.getSession(); latch.countDown(); - Session session = pool.getSession(); + session.get(); session.close(); return null; }); // Wait until the background thread is actually waiting for a session. latch.await(); // Wait until the request has timed out. - int waitCount = 0; - while (pool.getNumWaiterTimeouts() == 0L && waitCount < 1000) { - Thread.sleep(5L); - waitCount++; + Stopwatch watch = Stopwatch.createStarted(); + while (pool.getNumWaiterTimeouts() == 0L && watch.elapsed(TimeUnit.MILLISECONDS) < 100) { + Thread.yield(); } + assertTrue(pool.getNumWaiterTimeouts() > 0); // Return the checked out session to the pool so the async request will get a session and // finish. session2.close(); // Verify that the async request also succeeds. fut.get(10L, TimeUnit.SECONDS); executor.shutdown(); + assertTrue(executor.awaitTermination(10L, TimeUnit.SECONDS)); + inMemoryMetricReader.forceFlush(); metricDataCollection = inMemoryMetricReader.collectAllMetrics(); // Max Allowed sessions should be 3 @@ -2212,16 +2221,6 @@ public void testWaitOnMinSessionsWhenSessionsAreCreatedBeforeTimeout() { })) .when(sessionClient) .asyncBatchCreateSessions(Mockito.eq(1), Mockito.anyBoolean(), any(SessionConsumer.class)); - doAnswer( - invocation -> - executor.submit( - () -> { - MultiplexedSessionInitializationConsumer consumer = - invocation.getArgument(0, MultiplexedSessionInitializationConsumer.class); - consumer.onSessionReady(mockMultiplexedSession()); - })) - .when(sessionClient) - .asyncCreateMultiplexedSession(any(MultiplexedSessionInitializationConsumer.class)); pool = createPool(new FakeClock(), new FakeMetricRegistry(), SPANNER_DEFAULT_LABEL_VALUES); pool.maybeWaitOnMinSessions(); diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SingerProto.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SingerProto.java index 56fb82c15d2..c409f34177b 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SingerProto.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SingerProto.java @@ -1,5 +1,5 @@ /* - * Copyright 2022 Google LLC + * Copyright 2024 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,6 +16,8 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: src/test/resources/com/google/cloud/spanner/singer.proto + +// Protobuf Java Version: 3.25.1 package com.google.cloud.spanner; public final class SingerProto { @@ -26,7 +28,7 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLi public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); } - /** Protobuf enum {@code spanner.examples.music.Genre} */ + /** Protobuf enum {@code examples.spanner.music.Genre} */ public enum Genre implements com.google.protobuf.ProtocolMessageEnum { /** POP = 0; */ POP(0), @@ -36,6 +38,7 @@ public enum Genre implements com.google.protobuf.ProtocolMessageEnum { FOLK(2), /** ROCK = 3; */ ROCK(3), + UNRECOGNIZED(-1), ; /** POP = 0; */ @@ -48,6 +51,9 @@ public enum Genre implements com.google.protobuf.ProtocolMessageEnum { public static final int ROCK_VALUE = 3; public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new IllegalArgumentException("Can't get the number of an unknown enum value."); + } return value; } @@ -56,7 +62,7 @@ public final int getNumber() { * @return The enum associated with the given numeric wire value. * @deprecated Use {@link #forNumber(int)} instead. */ - @java.lang.Deprecated + @Deprecated public static Genre valueOf(int value) { return forNumber(value); } @@ -92,6 +98,9 @@ public Genre findValueByNumber(int number) { }; public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new IllegalStateException("Can't get the descriptor of an unrecognized enum value."); + } return getDescriptor().getValues().get(ordinal()); } @@ -100,14 +109,17 @@ public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { - return com.google.cloud.spanner.SingerProto.getDescriptor().getEnumTypes().get(0); + return SingerProto.getDescriptor().getEnumTypes().get(0); } private static final Genre[] VALUES = values(); public static Genre valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + throw new IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; } return VALUES[desc.getIndex()]; } @@ -118,12 +130,12 @@ private Genre(int value) { this.value = value; } - // @@protoc_insertion_point(enum_scope:spanner.examples.music.Genre) + // @@protoc_insertion_point(enum_scope:examples.spanner.music.Genre) } public interface SingerInfoOrBuilder extends - // @@protoc_insertion_point(interface_extends:spanner.examples.music.SingerInfo) + // @@protoc_insertion_point(interface_extends:examples.spanner.music.SingerInfo) com.google.protobuf.MessageOrBuilder { /** @@ -150,7 +162,7 @@ public interface SingerInfoOrBuilder * * @return The birthDate. */ - java.lang.String getBirthDate(); + String getBirthDate(); /** * optional string birth_date = 2; * @@ -169,7 +181,7 @@ public interface SingerInfoOrBuilder * * @return The nationality. */ - java.lang.String getNationality(); + String getNationality(); /** * optional string nationality = 3; * @@ -178,22 +190,28 @@ public interface SingerInfoOrBuilder com.google.protobuf.ByteString getNationalityBytes(); /** - * optional .spanner.examples.music.Genre genre = 4; + * optional .examples.spanner.music.Genre genre = 4; * * @return Whether the genre field is set. */ boolean hasGenre(); /** - * optional .spanner.examples.music.Genre genre = 4; + * optional .examples.spanner.music.Genre genre = 4; + * + * @return The enum numeric value on the wire for genre. + */ + int getGenreValue(); + /** + * optional .examples.spanner.music.Genre genre = 4; * * @return The genre. */ - com.google.cloud.spanner.SingerProto.Genre getGenre(); + Genre getGenre(); } - /** Protobuf type {@code spanner.examples.music.SingerInfo} */ + /** Protobuf type {@code examples.spanner.music.SingerInfo} */ public static final class SingerInfo extends com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:spanner.examples.music.SingerInfo) + // @@protoc_insertion_point(message_implements:examples.spanner.music.SingerInfo) SingerInfoOrBuilder { private static final long serialVersionUID = 0L; // Use SingerInfo.newBuilder() to construct. @@ -207,113 +225,31 @@ private SingerInfo() { genre_ = 0; } - @java.lang.Override + @Override @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + protected Object newInstance(UnusedPrivateParameter unused) { return new SingerInfo(); } - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet getUnknownFields() { - return this.unknownFields; - } - - private SingerInfo( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 8: - { - bitField0_ |= 0x00000001; - singerId_ = input.readInt64(); - break; - } - case 18: - { - com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000002; - birthDate_ = bs; - break; - } - case 26: - { - com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000004; - nationality_ = bs; - break; - } - case 32: - { - int rawValue = input.readEnum(); - @SuppressWarnings("deprecation") - com.google.cloud.spanner.SingerProto.Genre value = - com.google.cloud.spanner.SingerProto.Genre.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(4, rawValue); - } else { - bitField0_ |= 0x00000008; - genre_ = rawValue; - } - break; - } - default: - { - if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.spanner.SingerProto - .internal_static_spanner_examples_music_SingerInfo_descriptor; + return SingerProto.internal_static_examples_spanner_music_SingerInfo_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.spanner.SingerProto - .internal_static_spanner_examples_music_SingerInfo_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.spanner.SingerProto.SingerInfo.class, - com.google.cloud.spanner.SingerProto.SingerInfo.Builder.class); + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return SingerProto.internal_static_examples_spanner_music_SingerInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized(SingerInfo.class, Builder.class); } private int bitField0_; public static final int SINGER_ID_FIELD_NUMBER = 1; - private long singerId_; + private long singerId_ = 0L; /** * optional int64 singer_id = 1; * * @return Whether the singerId field is set. */ - @java.lang.Override + @Override public boolean hasSingerId() { return ((bitField0_ & 0x00000001) != 0); } @@ -322,19 +258,21 @@ public boolean hasSingerId() { * * @return The singerId. */ - @java.lang.Override + @Override public long getSingerId() { return singerId_; } public static final int BIRTH_DATE_FIELD_NUMBER = 2; - private volatile java.lang.Object birthDate_; + + @SuppressWarnings("serial") + private volatile Object birthDate_ = ""; /** * optional string birth_date = 2; * * @return Whether the birthDate field is set. */ - @java.lang.Override + @Override public boolean hasBirthDate() { return ((bitField0_ & 0x00000002) != 0); } @@ -343,17 +281,15 @@ public boolean hasBirthDate() { * * @return The birthDate. */ - @java.lang.Override - public java.lang.String getBirthDate() { - java.lang.Object ref = birthDate_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; + @Override + public String getBirthDate() { + Object ref = birthDate_; + if (ref instanceof String) { + return (String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - birthDate_ = s; - } + String s = bs.toStringUtf8(); + birthDate_ = s; return s; } } @@ -362,12 +298,12 @@ public java.lang.String getBirthDate() { * * @return The bytes for birthDate. */ - @java.lang.Override + @Override public com.google.protobuf.ByteString getBirthDateBytes() { - java.lang.Object ref = birthDate_; - if (ref instanceof java.lang.String) { + Object ref = birthDate_; + if (ref instanceof String) { com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + com.google.protobuf.ByteString.copyFromUtf8((String) ref); birthDate_ = b; return b; } else { @@ -376,13 +312,15 @@ public com.google.protobuf.ByteString getBirthDateBytes() { } public static final int NATIONALITY_FIELD_NUMBER = 3; - private volatile java.lang.Object nationality_; + + @SuppressWarnings("serial") + private volatile Object nationality_ = ""; /** * optional string nationality = 3; * * @return Whether the nationality field is set. */ - @java.lang.Override + @Override public boolean hasNationality() { return ((bitField0_ & 0x00000004) != 0); } @@ -391,17 +329,15 @@ public boolean hasNationality() { * * @return The nationality. */ - @java.lang.Override - public java.lang.String getNationality() { - java.lang.Object ref = nationality_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; + @Override + public String getNationality() { + Object ref = nationality_; + if (ref instanceof String) { + return (String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - nationality_ = s; - } + String s = bs.toStringUtf8(); + nationality_ = s; return s; } } @@ -410,12 +346,12 @@ public java.lang.String getNationality() { * * @return The bytes for nationality. */ - @java.lang.Override + @Override public com.google.protobuf.ByteString getNationalityBytes() { - java.lang.Object ref = nationality_; - if (ref instanceof java.lang.String) { + Object ref = nationality_; + if (ref instanceof String) { com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + com.google.protobuf.ByteString.copyFromUtf8((String) ref); nationality_ = b; return b; } else { @@ -424,32 +360,39 @@ public com.google.protobuf.ByteString getNationalityBytes() { } public static final int GENRE_FIELD_NUMBER = 4; - private int genre_; + private int genre_ = 0; /** - * optional .spanner.examples.music.Genre genre = 4; + * optional .examples.spanner.music.Genre genre = 4; * * @return Whether the genre field is set. */ - @java.lang.Override + @Override public boolean hasGenre() { return ((bitField0_ & 0x00000008) != 0); } /** - * optional .spanner.examples.music.Genre genre = 4; + * optional .examples.spanner.music.Genre genre = 4; + * + * @return The enum numeric value on the wire for genre. + */ + @Override + public int getGenreValue() { + return genre_; + } + /** + * optional .examples.spanner.music.Genre genre = 4; * * @return The genre. */ - @java.lang.Override - public com.google.cloud.spanner.SingerProto.Genre getGenre() { - @SuppressWarnings("deprecation") - com.google.cloud.spanner.SingerProto.Genre result = - com.google.cloud.spanner.SingerProto.Genre.valueOf(genre_); - return result == null ? com.google.cloud.spanner.SingerProto.Genre.POP : result; + @Override + public Genre getGenre() { + Genre result = Genre.forNumber(genre_); + return result == null ? Genre.UNRECOGNIZED : result; } private byte memoizedIsInitialized = -1; - @java.lang.Override + @Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; @@ -459,7 +402,7 @@ public final boolean isInitialized() { return true; } - @java.lang.Override + @Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) != 0)) { output.writeInt64(1, singerId_); @@ -473,10 +416,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (((bitField0_ & 0x00000008) != 0)) { output.writeEnum(4, genre_); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + @Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; @@ -494,21 +437,20 @@ public int getSerializedSize() { if (((bitField0_ & 0x00000008) != 0)) { size += com.google.protobuf.CodedOutputStream.computeEnumSize(4, genre_); } - size += unknownFields.getSerializedSize(); + size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } - @java.lang.Override - public boolean equals(final java.lang.Object obj) { + @Override + public boolean equals(final Object obj) { if (obj == this) { return true; } - if (!(obj instanceof com.google.cloud.spanner.SingerProto.SingerInfo)) { + if (!(obj instanceof SingerInfo)) { return super.equals(obj); } - com.google.cloud.spanner.SingerProto.SingerInfo other = - (com.google.cloud.spanner.SingerProto.SingerInfo) obj; + SingerInfo other = (SingerInfo) obj; if (hasSingerId() != other.hasSingerId()) return false; if (hasSingerId()) { @@ -526,11 +468,11 @@ public boolean equals(final java.lang.Object obj) { if (hasGenre()) { if (genre_ != other.genre_) return false; } - if (!unknownFields.equals(other.unknownFields)) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } - @java.lang.Override + @Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; @@ -553,76 +495,74 @@ public int hashCode() { hash = (37 * hash) + GENRE_FIELD_NUMBER; hash = (53 * hash) + genre_; } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static com.google.cloud.spanner.SingerProto.SingerInfo parseFrom( - java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + public static SingerInfo parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static com.google.cloud.spanner.SingerProto.SingerInfo parseFrom( + public static SingerInfo parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static com.google.cloud.spanner.SingerProto.SingerInfo parseFrom( - com.google.protobuf.ByteString data) + public static SingerInfo parseFrom(com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static com.google.cloud.spanner.SingerProto.SingerInfo parseFrom( + public static SingerInfo parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static com.google.cloud.spanner.SingerProto.SingerInfo parseFrom(byte[] data) + public static SingerInfo parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static com.google.cloud.spanner.SingerProto.SingerInfo parseFrom( + public static SingerInfo parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static com.google.cloud.spanner.SingerProto.SingerInfo parseFrom( - java.io.InputStream input) throws java.io.IOException { + public static SingerInfo parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } - public static com.google.cloud.spanner.SingerProto.SingerInfo parseFrom( + public static SingerInfo parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } - public static com.google.cloud.spanner.SingerProto.SingerInfo parseDelimitedFrom( - java.io.InputStream input) throws java.io.IOException { + public static SingerInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); } - public static com.google.cloud.spanner.SingerProto.SingerInfo parseDelimitedFrom( + public static SingerInfo parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( PARSER, input, extensionRegistry); } - public static com.google.cloud.spanner.SingerProto.SingerInfo parseFrom( - com.google.protobuf.CodedInputStream input) throws java.io.IOException { + public static SingerInfo parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } - public static com.google.cloud.spanner.SingerProto.SingerInfo parseFrom( + public static SingerInfo parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -630,7 +570,7 @@ public static com.google.cloud.spanner.SingerProto.SingerInfo parseFrom( PARSER, input, extensionRegistry); } - @java.lang.Override + @Override public Builder newBuilderForType() { return newBuilder(); } @@ -639,94 +579,84 @@ public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } - public static Builder newBuilder(com.google.cloud.spanner.SingerProto.SingerInfo prototype) { + public static Builder newBuilder(SingerInfo prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } - @java.lang.Override + @Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + @Override + protected Builder newBuilderForType(BuilderParent parent) { Builder builder = new Builder(parent); return builder; } - /** Protobuf type {@code spanner.examples.music.SingerInfo} */ + /** Protobuf type {@code examples.spanner.music.SingerInfo} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:spanner.examples.music.SingerInfo) - com.google.cloud.spanner.SingerProto.SingerInfoOrBuilder { + // @@protoc_insertion_point(builder_implements:examples.spanner.music.SingerInfo) + SingerInfoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.spanner.SingerProto - .internal_static_spanner_examples_music_SingerInfo_descriptor; + return SingerProto.internal_static_examples_spanner_music_SingerInfo_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.spanner.SingerProto - .internal_static_spanner_examples_music_SingerInfo_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.spanner.SingerProto.SingerInfo.class, - com.google.cloud.spanner.SingerProto.SingerInfo.Builder.class); + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return SingerProto.internal_static_examples_spanner_music_SingerInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized(SingerInfo.class, Builder.class); } // Construct using com.google.cloud.spanner.SingerProto.SingerInfo.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } + private Builder() {} - private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); - maybeForceBuilderInitialization(); - } - - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} } - @java.lang.Override + @Override public Builder clear() { super.clear(); + bitField0_ = 0; singerId_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); birthDate_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); nationality_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); genre_ = 0; - bitField0_ = (bitField0_ & ~0x00000008); return this; } - @java.lang.Override + @Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.google.cloud.spanner.SingerProto - .internal_static_spanner_examples_music_SingerInfo_descriptor; + return SingerProto.internal_static_examples_spanner_music_SingerInfo_descriptor; } - @java.lang.Override - public com.google.cloud.spanner.SingerProto.SingerInfo getDefaultInstanceForType() { - return com.google.cloud.spanner.SingerProto.SingerInfo.getDefaultInstance(); + @Override + public SingerInfo getDefaultInstanceForType() { + return SingerInfo.getDefaultInstance(); } - @java.lang.Override - public com.google.cloud.spanner.SingerProto.SingerInfo build() { - com.google.cloud.spanner.SingerProto.SingerInfo result = buildPartial(); + @Override + public SingerInfo build() { + SingerInfo result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - @java.lang.Override - public com.google.cloud.spanner.SingerProto.SingerInfo buildPartial() { - com.google.cloud.spanner.SingerProto.SingerInfo result = - new com.google.cloud.spanner.SingerProto.SingerInfo(this); + @Override + public SingerInfo buildPartial() { + SingerInfo result = new SingerInfo(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(SingerInfo result) { int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) != 0)) { @@ -734,113 +664,144 @@ public com.google.cloud.spanner.SingerProto.SingerInfo buildPartial() { to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000002) != 0)) { + result.birthDate_ = birthDate_; to_bitField0_ |= 0x00000002; } - result.birthDate_ = birthDate_; if (((from_bitField0_ & 0x00000004) != 0)) { + result.nationality_ = nationality_; to_bitField0_ |= 0x00000004; } - result.nationality_ = nationality_; if (((from_bitField0_ & 0x00000008) != 0)) { + result.genre_ = genre_; to_bitField0_ |= 0x00000008; } - result.genre_ = genre_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; + result.bitField0_ |= to_bitField0_; } - @java.lang.Override + @Override public Builder clone() { return super.clone(); } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + @Override + public Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { return super.setField(field, value); } - @java.lang.Override + @Override public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } - @java.lang.Override + @Override public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } - @java.lang.Override + @Override public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, - java.lang.Object value) { + com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { return super.setRepeatedField(field, index, value); } - @java.lang.Override + @Override public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { return super.addRepeatedField(field, value); } - @java.lang.Override + @Override public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof com.google.cloud.spanner.SingerProto.SingerInfo) { - return mergeFrom((com.google.cloud.spanner.SingerProto.SingerInfo) other); + if (other instanceof SingerInfo) { + return mergeFrom((SingerInfo) other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(com.google.cloud.spanner.SingerProto.SingerInfo other) { - if (other == com.google.cloud.spanner.SingerProto.SingerInfo.getDefaultInstance()) - return this; + public Builder mergeFrom(SingerInfo other) { + if (other == SingerInfo.getDefaultInstance()) return this; if (other.hasSingerId()) { setSingerId(other.getSingerId()); } if (other.hasBirthDate()) { - bitField0_ |= 0x00000002; birthDate_ = other.birthDate_; + bitField0_ |= 0x00000002; onChanged(); } if (other.hasNationality()) { - bitField0_ |= 0x00000004; nationality_ = other.nationality_; + bitField0_ |= 0x00000004; onChanged(); } if (other.hasGenre()) { setGenre(other.getGenre()); } - this.mergeUnknownFields(other.unknownFields); + this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } - @java.lang.Override + @Override public final boolean isInitialized() { return true; } - @java.lang.Override + @Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - com.google.cloud.spanner.SingerProto.SingerInfo parsedMessage = null; + if (extensionRegistry == null) { + throw new NullPointerException(); + } try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + singerId_ = input.readInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + birthDate_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + nationality_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 32: + { + genre_ = input.readEnum(); + bitField0_ |= 0x00000008; + break; + } // case 32 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = - (com.google.cloud.spanner.SingerProto.SingerInfo) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } + onChanged(); + } // finally return this; } @@ -852,7 +813,7 @@ public Builder mergeFrom( * * @return Whether the singerId field is set. */ - @java.lang.Override + @Override public boolean hasSingerId() { return ((bitField0_ & 0x00000001) != 0); } @@ -861,7 +822,7 @@ public boolean hasSingerId() { * * @return The singerId. */ - @java.lang.Override + @Override public long getSingerId() { return singerId_; } @@ -872,8 +833,9 @@ public long getSingerId() { * @return This builder for chaining. */ public Builder setSingerId(long value) { - bitField0_ |= 0x00000001; + singerId_ = value; + bitField0_ |= 0x00000001; onChanged(); return this; } @@ -889,7 +851,7 @@ public Builder clearSingerId() { return this; } - private java.lang.Object birthDate_ = ""; + private Object birthDate_ = ""; /** * optional string birth_date = 2; * @@ -903,17 +865,15 @@ public boolean hasBirthDate() { * * @return The birthDate. */ - public java.lang.String getBirthDate() { - java.lang.Object ref = birthDate_; - if (!(ref instanceof java.lang.String)) { + public String getBirthDate() { + Object ref = birthDate_; + if (!(ref instanceof String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - birthDate_ = s; - } + String s = bs.toStringUtf8(); + birthDate_ = s; return s; } else { - return (java.lang.String) ref; + return (String) ref; } } /** @@ -922,10 +882,10 @@ public java.lang.String getBirthDate() { * @return The bytes for birthDate. */ public com.google.protobuf.ByteString getBirthDateBytes() { - java.lang.Object ref = birthDate_; + Object ref = birthDate_; if (ref instanceof String) { com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + com.google.protobuf.ByteString.copyFromUtf8((String) ref); birthDate_ = b; return b; } else { @@ -938,12 +898,12 @@ public com.google.protobuf.ByteString getBirthDateBytes() { * @param value The birthDate to set. * @return This builder for chaining. */ - public Builder setBirthDate(java.lang.String value) { + public Builder setBirthDate(String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000002; birthDate_ = value; + bitField0_ |= 0x00000002; onChanged(); return this; } @@ -953,8 +913,8 @@ public Builder setBirthDate(java.lang.String value) { * @return This builder for chaining. */ public Builder clearBirthDate() { - bitField0_ = (bitField0_ & ~0x00000002); birthDate_ = getDefaultInstance().getBirthDate(); + bitField0_ = (bitField0_ & ~0x00000002); onChanged(); return this; } @@ -968,13 +928,14 @@ public Builder setBirthDateBytes(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000002; + checkByteStringIsUtf8(value); birthDate_ = value; + bitField0_ |= 0x00000002; onChanged(); return this; } - private java.lang.Object nationality_ = ""; + private Object nationality_ = ""; /** * optional string nationality = 3; * @@ -988,17 +949,15 @@ public boolean hasNationality() { * * @return The nationality. */ - public java.lang.String getNationality() { - java.lang.Object ref = nationality_; - if (!(ref instanceof java.lang.String)) { + public String getNationality() { + Object ref = nationality_; + if (!(ref instanceof String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - nationality_ = s; - } + String s = bs.toStringUtf8(); + nationality_ = s; return s; } else { - return (java.lang.String) ref; + return (String) ref; } } /** @@ -1007,10 +966,10 @@ public java.lang.String getNationality() { * @return The bytes for nationality. */ public com.google.protobuf.ByteString getNationalityBytes() { - java.lang.Object ref = nationality_; + Object ref = nationality_; if (ref instanceof String) { com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + com.google.protobuf.ByteString.copyFromUtf8((String) ref); nationality_ = b; return b; } else { @@ -1023,12 +982,12 @@ public com.google.protobuf.ByteString getNationalityBytes() { * @param value The nationality to set. * @return This builder for chaining. */ - public Builder setNationality(java.lang.String value) { + public Builder setNationality(String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000004; nationality_ = value; + bitField0_ |= 0x00000004; onChanged(); return this; } @@ -1038,8 +997,8 @@ public Builder setNationality(java.lang.String value) { * @return This builder for chaining. */ public Builder clearNationality() { - bitField0_ = (bitField0_ & ~0x00000004); nationality_ = getDefaultInstance().getNationality(); + bitField0_ = (bitField0_ & ~0x00000004); onChanged(); return this; } @@ -1053,41 +1012,61 @@ public Builder setNationalityBytes(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000004; + checkByteStringIsUtf8(value); nationality_ = value; + bitField0_ |= 0x00000004; onChanged(); return this; } private int genre_ = 0; /** - * optional .spanner.examples.music.Genre genre = 4; + * optional .examples.spanner.music.Genre genre = 4; * * @return Whether the genre field is set. */ - @java.lang.Override + @Override public boolean hasGenre() { return ((bitField0_ & 0x00000008) != 0); } /** - * optional .spanner.examples.music.Genre genre = 4; + * optional .examples.spanner.music.Genre genre = 4; + * + * @return The enum numeric value on the wire for genre. + */ + @Override + public int getGenreValue() { + return genre_; + } + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @param value The enum numeric value on the wire for genre to set. + * @return This builder for chaining. + */ + public Builder setGenreValue(int value) { + genre_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + /** + * optional .examples.spanner.music.Genre genre = 4; * * @return The genre. */ - @java.lang.Override - public com.google.cloud.spanner.SingerProto.Genre getGenre() { - @SuppressWarnings("deprecation") - com.google.cloud.spanner.SingerProto.Genre result = - com.google.cloud.spanner.SingerProto.Genre.valueOf(genre_); - return result == null ? com.google.cloud.spanner.SingerProto.Genre.POP : result; + @Override + public Genre getGenre() { + Genre result = Genre.forNumber(genre_); + return result == null ? Genre.UNRECOGNIZED : result; } /** - * optional .spanner.examples.music.Genre genre = 4; + * optional .examples.spanner.music.Genre genre = 4; * * @param value The genre to set. * @return This builder for chaining. */ - public Builder setGenre(com.google.cloud.spanner.SingerProto.Genre value) { + public Builder setGenre(Genre value) { if (value == null) { throw new NullPointerException(); } @@ -1097,7 +1076,7 @@ public Builder setGenre(com.google.cloud.spanner.SingerProto.Genre value) { return this; } /** - * optional .spanner.examples.music.Genre genre = 4; + * optional .examples.spanner.music.Genre genre = 4; * * @return This builder for chaining. */ @@ -1108,41 +1087,52 @@ public Builder clearGenre() { return this; } - @java.lang.Override + @Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } - @java.lang.Override + @Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } - // @@protoc_insertion_point(builder_scope:spanner.examples.music.SingerInfo) + // @@protoc_insertion_point(builder_scope:examples.spanner.music.SingerInfo) } - // @@protoc_insertion_point(class_scope:spanner.examples.music.SingerInfo) - private static final com.google.cloud.spanner.SingerProto.SingerInfo DEFAULT_INSTANCE; + // @@protoc_insertion_point(class_scope:examples.spanner.music.SingerInfo) + private static final SingerInfo DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new com.google.cloud.spanner.SingerProto.SingerInfo(); + DEFAULT_INSTANCE = new SingerInfo(); } - public static com.google.cloud.spanner.SingerProto.SingerInfo getDefaultInstance() { + public static SingerInfo getDefaultInstance() { return DEFAULT_INSTANCE; } - @java.lang.Deprecated - public static final com.google.protobuf.Parser PARSER = + private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override + @Override public SingerInfo parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new SingerInfo(input, extensionRegistry); + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); } }; @@ -1150,21 +1140,21 @@ public static com.google.protobuf.Parser parser() { return PARSER; } - @java.lang.Override + @Override public com.google.protobuf.Parser getParserForType() { return PARSER; } - @java.lang.Override - public com.google.cloud.spanner.SingerProto.SingerInfo getDefaultInstanceForType() { + @Override + public SingerInfo getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } private static final com.google.protobuf.Descriptors.Descriptor - internal_static_spanner_examples_music_SingerInfo_descriptor; + internal_static_examples_spanner_music_SingerInfo_descriptor; private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_spanner_examples_music_SingerInfo_fieldAccessorTable; + internal_static_examples_spanner_music_SingerInfo_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; @@ -1173,24 +1163,26 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { private static com.google.protobuf.Descriptors.FileDescriptor descriptor; static { - java.lang.String[] descriptorData = { - "\n\014singer.proto\022\026spanner.examples.music\"v" - + "\n\nSingerInfo\022\021\n\tsinger_id\030\001 \001(\003\022\022\n\nbirth" - + "_date\030\002 \001(\t\022\023\n\013nationality\030\003 \001(\t\022,\n\005genr" - + "e\030\004 \001(\0162\035.spanner.examples.music.Genre*." - + "\n\005Genre\022\007\n\003POP\020\000\022\010\n\004JAZZ\020\001\022\010\n\004FOLK\020\002\022\010\n\004" - + "ROCK\020\003B)\n\030com.google.cloud.spannerB\013Sing" - + "erProtoP\000" + String[] descriptorData = { + "\n\014singer.proto\022\026examples.spanner.music\"\301" + + "\001\n\nSingerInfo\022\026\n\tsinger_id\030\001 \001(\003H\000\210\001\001\022\027\n" + + "\nbirth_date\030\002 \001(\tH\001\210\001\001\022\030\n\013nationality\030\003 " + + "\001(\tH\002\210\001\001\0221\n\005genre\030\004 \001(\0162\035.examples.spann" + + "er.music.GenreH\003\210\001\001B\014\n\n_singer_idB\r\n\013_bi" + + "rth_dateB\016\n\014_nationalityB\010\n\006_genre*.\n\005Ge" + + "nre\022\007\n\003POP\020\000\022\010\n\004JAZZ\020\001\022\010\n\004FOLK\020\002\022\010\n\004ROCK" + + "\020\003B)\n\030com.google.cloud.spannerB\013SingerPr" + + "otoP\000b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] {}); - internal_static_spanner_examples_music_SingerInfo_descriptor = + internal_static_examples_spanner_music_SingerInfo_descriptor = getDescriptor().getMessageTypes().get(0); - internal_static_spanner_examples_music_SingerInfo_fieldAccessorTable = + internal_static_examples_spanner_music_SingerInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_spanner_examples_music_SingerInfo_descriptor, - new java.lang.String[] { + internal_static_examples_spanner_music_SingerInfo_descriptor, + new String[] { "SingerId", "BirthDate", "Nationality", "Genre", }); } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpanTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpanTest.java index fdfe4871680..ffe9e584de3 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpanTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpanTest.java @@ -63,6 +63,7 @@ @Category(TracerTest.class) @RunWith(JUnit4.class) public class SpanTest { + private static final String TEST_PROJECT = "my-project"; private static final String TEST_INSTANCE = "my-instance"; private static final String TEST_DATABASE = "my-database"; @@ -506,12 +507,13 @@ public void transactionRunnerWithError() { Map spans = failOnOverkillTraceComponent.getSpans(); if (isMultiplexedSessionsEnabled()) { - assertEquals(spans.toString(), 4, spans.size()); + assertEquals(spans.toString(), 5, spans.size()); assertThat(spans).containsEntry("CloudSpannerOperation.CreateMultiplexedSession", true); } else { - assertThat(spans.size()).isEqualTo(3); + assertThat(spans.size()).isEqualTo(4); } assertThat(spans).containsEntry("CloudSpanner.ReadWriteTransaction", true); + assertThat(spans).containsEntry("CloudSpannerOperation.ExecuteUpdate", true); assertThat(spans).containsEntry("CloudSpannerOperation.BatchCreateSessions", true); assertThat(spans).containsEntry("CloudSpannerOperation.BatchCreateSessionsRequest", true); diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerCloudMonitoringExporterTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerCloudMonitoringExporterTest.java new file mode 100644 index 00000000000..db245d3af81 --- /dev/null +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerCloudMonitoringExporterTest.java @@ -0,0 +1,349 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.BuiltInMetricsConstant.CLIENT_HASH_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.CLIENT_NAME_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.CLIENT_UID_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.DATABASE_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.DIRECT_PATH_ENABLED_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.DIRECT_PATH_USED_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.GAX_METER_NAME; +import static com.google.cloud.spanner.BuiltInMetricsConstant.INSTANCE_CONFIG_ID_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.INSTANCE_ID_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.LOCATION_ID_KEY; +import static com.google.cloud.spanner.BuiltInMetricsConstant.OPERATION_COUNT_NAME; +import static com.google.cloud.spanner.BuiltInMetricsConstant.OPERATION_LATENCIES_NAME; +import static com.google.cloud.spanner.BuiltInMetricsConstant.PROJECT_ID_KEY; +import static com.google.common.truth.Truth.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.api.Distribution; +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.monitoring.v3.MetricServiceClient; +import com.google.cloud.monitoring.v3.stub.MetricServiceStub; +import com.google.common.collect.ImmutableList; +import com.google.monitoring.v3.CreateTimeSeriesRequest; +import com.google.monitoring.v3.TimeSeries; +import com.google.protobuf.Empty; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.common.InstrumentationScopeInfo; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.HistogramPointData; +import io.opentelemetry.sdk.metrics.data.LongPointData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableHistogramData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableHistogramPointData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableLongPointData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableMetricData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableSumData; +import io.opentelemetry.sdk.resources.Resource; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnit; +import org.mockito.junit.MockitoRule; + +public class SpannerCloudMonitoringExporterTest { + + private static final String projectId = "fake-project"; + private static final String instanceId = "fake-instance"; + private static final String locationId = "global"; + private static final String databaseId = "fake-database"; + private static final String clientName = "spanner-java"; + + private static final String clientHash = "spanner-test"; + private static final String instanceConfigId = "fake-instance-config-id"; + + @Rule public final MockitoRule mockitoRule = MockitoJUnit.rule(); + + @Mock private MetricServiceStub mockMetricServiceStub; + private MetricServiceClient fakeMetricServiceClient; + private SpannerCloudMonitoringExporter exporter; + + private Attributes attributes; + private Resource resource; + private InstrumentationScopeInfo scope; + + @Before + public void setUp() { + fakeMetricServiceClient = new FakeMetricServiceClient(mockMetricServiceStub); + exporter = new SpannerCloudMonitoringExporter(projectId, fakeMetricServiceClient); + + attributes = + Attributes.builder() + .put(PROJECT_ID_KEY, projectId) + .put(INSTANCE_ID_KEY, instanceId) + .put(LOCATION_ID_KEY, locationId) + .put(INSTANCE_CONFIG_ID_KEY, instanceConfigId) + .put(DATABASE_KEY, databaseId) + .put(CLIENT_NAME_KEY, clientName) + .put(CLIENT_HASH_KEY, clientHash) + .put(String.valueOf(DIRECT_PATH_ENABLED_KEY), true) + .put(String.valueOf(DIRECT_PATH_USED_KEY), true) + .build(); + + resource = Resource.create(Attributes.empty()); + + scope = InstrumentationScopeInfo.create(GAX_METER_NAME); + } + + @After + public void tearDown() {} + + @Test + public void testExportingSumData() { + ArgumentCaptor argumentCaptor = + ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); + + UnaryCallable mockCallable = Mockito.mock(UnaryCallable.class); + Mockito.when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); + ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance()); + Mockito.when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future); + + long fakeValue = 11L; + + long startEpoch = 10; + long endEpoch = 15; + LongPointData longPointData = + ImmutableLongPointData.create(startEpoch, endEpoch, attributes, fakeValue); + + MetricData longData = + ImmutableMetricData.createLongSum( + resource, + scope, + "spanner.googleapis.com/internal/client/" + OPERATION_COUNT_NAME, + "description", + "1", + ImmutableSumData.create( + true, AggregationTemporality.CUMULATIVE, ImmutableList.of(longPointData))); + + exporter.export(Arrays.asList(longData)); + + CreateTimeSeriesRequest request = argumentCaptor.getValue(); + + assertThat(request.getTimeSeriesList()).hasSize(1); + + TimeSeries timeSeries = request.getTimeSeriesList().get(0); + + assertThat(timeSeries.getResource().getLabelsMap()) + .containsExactly( + PROJECT_ID_KEY.getKey(), projectId, + INSTANCE_ID_KEY.getKey(), instanceId, + LOCATION_ID_KEY.getKey(), locationId, + INSTANCE_CONFIG_ID_KEY.getKey(), instanceConfigId, + CLIENT_HASH_KEY.getKey(), clientHash); + + assertThat(timeSeries.getResource().getLabelsMap()).hasSize(5); + + assertThat(timeSeries.getMetric().getLabelsMap()) + .containsExactly( + DATABASE_KEY.getKey(), + databaseId, + CLIENT_NAME_KEY.getKey(), + clientName, + DIRECT_PATH_ENABLED_KEY.getKey(), + "true", + DIRECT_PATH_USED_KEY.getKey(), + "true"); + assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(4); + + assertThat(timeSeries.getPoints(0).getValue().getInt64Value()).isEqualTo(fakeValue); + assertThat(timeSeries.getPoints(0).getInterval().getStartTime().getNanos()) + .isEqualTo(startEpoch); + assertThat(timeSeries.getPoints(0).getInterval().getEndTime().getNanos()).isEqualTo(endEpoch); + } + + @Test + public void testExportingHistogramData() { + ArgumentCaptor argumentCaptor = + ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); + + UnaryCallable mockCallable = mock(UnaryCallable.class); + when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); + ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance()); + when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future); + + long startEpoch = 10; + long endEpoch = 15; + HistogramPointData histogramPointData = + ImmutableHistogramPointData.create( + startEpoch, + endEpoch, + attributes, + 3d, + true, + 1d, // min + true, + 2d, // max + Arrays.asList(1.0), + Arrays.asList(1L, 2L)); + + MetricData histogramData = + ImmutableMetricData.createDoubleHistogram( + resource, + scope, + "spanner.googleapis.com/internal/client/" + OPERATION_LATENCIES_NAME, + "description", + "ms", + ImmutableHistogramData.create( + AggregationTemporality.CUMULATIVE, ImmutableList.of(histogramPointData))); + + exporter.export(Arrays.asList(histogramData)); + + CreateTimeSeriesRequest request = argumentCaptor.getValue(); + + assertThat(request.getTimeSeriesList()).hasSize(1); + + TimeSeries timeSeries = request.getTimeSeriesList().get(0); + + assertThat(timeSeries.getResource().getLabelsMap()).hasSize(5); + assertThat(timeSeries.getResource().getLabelsMap()) + .containsExactly( + PROJECT_ID_KEY.getKey(), projectId, + INSTANCE_ID_KEY.getKey(), instanceId, + LOCATION_ID_KEY.getKey(), locationId, + INSTANCE_CONFIG_ID_KEY.getKey(), instanceConfigId, + CLIENT_HASH_KEY.getKey(), clientHash); + + assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(4); + assertThat(timeSeries.getMetric().getLabelsMap()) + .containsExactly( + DATABASE_KEY.getKey(), + databaseId, + CLIENT_NAME_KEY.getKey(), + clientName, + DIRECT_PATH_ENABLED_KEY.getKey(), + "true", + DIRECT_PATH_USED_KEY.getKey(), + "true"); + + Distribution distribution = timeSeries.getPoints(0).getValue().getDistributionValue(); + assertThat(distribution.getCount()).isEqualTo(3); + assertThat(timeSeries.getPoints(0).getInterval().getStartTime().getNanos()) + .isEqualTo(startEpoch); + assertThat(timeSeries.getPoints(0).getInterval().getEndTime().getNanos()).isEqualTo(endEpoch); + } + + @Test + public void testExportingSumDataInBatches() { + ArgumentCaptor argumentCaptor = + ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); + + UnaryCallable mockCallable = mock(UnaryCallable.class); + when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); + ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance()); + when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future); + + long startEpoch = 10; + long endEpoch = 15; + + Collection toExport = new ArrayList<>(); + for (int i = 0; i < 250; i++) { + LongPointData longPointData = + ImmutableLongPointData.create( + startEpoch, + endEpoch, + attributes.toBuilder().put(CLIENT_UID_KEY, "client_uid" + i).build(), + i); + + MetricData longData = + ImmutableMetricData.createLongSum( + resource, + scope, + "spanner.googleapis.com/internal/client/" + OPERATION_COUNT_NAME, + "description", + "1", + ImmutableSumData.create( + true, AggregationTemporality.CUMULATIVE, ImmutableList.of(longPointData))); + toExport.add(longData); + } + + exporter.export(toExport); + + assertThat(argumentCaptor.getAllValues()).hasSize(2); + CreateTimeSeriesRequest firstRequest = argumentCaptor.getAllValues().get(0); + CreateTimeSeriesRequest secondRequest = argumentCaptor.getAllValues().get(1); + + assertThat(firstRequest.getTimeSeriesList()).hasSize(200); + assertThat(secondRequest.getTimeSeriesList()).hasSize(50); + + for (int i = 0; i < 250; i++) { + TimeSeries timeSeries; + if (i < 200) { + timeSeries = firstRequest.getTimeSeriesList().get(i); + } else { + timeSeries = secondRequest.getTimeSeriesList().get(i - 200); + } + + assertThat(timeSeries.getResource().getLabelsMap()).hasSize(5); + assertThat(timeSeries.getResource().getLabelsMap()) + .containsExactly( + PROJECT_ID_KEY.getKey(), projectId, + INSTANCE_ID_KEY.getKey(), instanceId, + LOCATION_ID_KEY.getKey(), locationId, + INSTANCE_CONFIG_ID_KEY.getKey(), instanceConfigId, + CLIENT_HASH_KEY.getKey(), clientHash); + + assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(5); + assertThat(timeSeries.getMetric().getLabelsMap()) + .containsExactly( + DATABASE_KEY.getKey(), + databaseId, + CLIENT_NAME_KEY.getKey(), + clientName, + DIRECT_PATH_ENABLED_KEY.getKey(), + "true", + DIRECT_PATH_USED_KEY.getKey(), + "true", + CLIENT_UID_KEY.getKey(), + "client_uid" + i); + + assertThat(timeSeries.getPoints(0).getValue().getInt64Value()).isEqualTo(i); + assertThat(timeSeries.getPoints(0).getInterval().getStartTime().getNanos()) + .isEqualTo(startEpoch); + assertThat(timeSeries.getPoints(0).getInterval().getEndTime().getNanos()).isEqualTo(endEpoch); + } + } + + @Test + public void getAggregationTemporality() throws IOException { + SpannerCloudMonitoringExporter actualExporter = + SpannerCloudMonitoringExporter.create(projectId, null); + assertThat(actualExporter.getAggregationTemporality(InstrumentType.COUNTER)) + .isEqualTo(AggregationTemporality.CUMULATIVE); + } + + private static class FakeMetricServiceClient extends MetricServiceClient { + + protected FakeMetricServiceClient(MetricServiceStub stub) { + super(stub); + } + } +} diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerOptionsHelper.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerOptionsHelper.java new file mode 100644 index 00000000000..db02c625099 --- /dev/null +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerOptionsHelper.java @@ -0,0 +1,29 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +/** Helper to configure SpannerOptions for tests. */ +public class SpannerOptionsHelper { + + /** + * Resets the activeTracingFramework. This variable is used for internal testing, and is not a + * valid production scenario. + */ + public static void resetActiveTracingFramework() { + SpannerOptions.resetActiveTracingFramework(); + } +} diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerOptionsTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerOptionsTest.java index 522cd0147a3..e8421cd235c 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerOptionsTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerOptionsTest.java @@ -104,16 +104,21 @@ public static void resetLogging() { @Test public void defaultBuilder() { - // We need to set the project id since in test environment we cannot obtain a default project - // id. - SpannerOptions options = SpannerOptions.newBuilder().setProjectId("test-project").build(); + // We need to set the project id and credentials since in test environments we cannot guarantee + // that a default project id and credentials are available. + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("test-project") + .setCredentials(NoCredentials.getInstance()) + .build(); if (Strings.isNullOrEmpty(System.getenv("SPANNER_EMULATOR_HOST"))) { - assertThat(options.getHost()).isEqualTo("https://spanner.googleapis.com"); + assertEquals("https://spanner.googleapis.com", options.getHost()); } else { - assertThat(options.getHost()).isEqualTo("http://" + System.getenv("SPANNER_EMULATOR_HOST")); + assertEquals("http://" + System.getenv("SPANNER_EMULATOR_HOST"), options.getHost()); } - assertThat(options.getPrefetchChunks()).isEqualTo(4); + assertEquals(4, options.getPrefetchChunks()); assertNull(options.getSessionLabels()); + assertEquals(DecodeMode.DIRECT, options.getDecodeMode()); } @Test @@ -731,6 +736,24 @@ public void testLeaderAwareRoutingEnablement() { .isLeaderAwareRoutingEnabled()); } + @Test + public void testEndToEndTracingEnablement() { + // Test that end to end tracing is disabled by default. + assertFalse(SpannerOptions.newBuilder().setProjectId("p").build().isEndToEndTracingEnabled()); + assertTrue( + SpannerOptions.newBuilder() + .setProjectId("p") + .setEnableEndToEndTracing(true) + .build() + .isEndToEndTracingEnabled()); + assertFalse( + SpannerOptions.newBuilder() + .setProjectId("p") + .setEnableEndToEndTracing(false) + .build() + .isEndToEndTracingEnabled()); + } + @Test public void testSetDirectedReadOptions() { final DirectedReadOptions directedReadOptions = diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerOptionsTestHelper.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerOptionsTestHelper.java new file mode 100644 index 00000000000..8f8c0a30a8c --- /dev/null +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerOptionsTestHelper.java @@ -0,0 +1,23 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner; + +public class SpannerOptionsTestHelper { + + public static void resetActiveTracingFramework() { + SpannerOptions.resetActiveTracingFramework(); + } +} diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionContextImplTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionContextImplTest.java index 53120eda441..561bfb89008 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionContextImplTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionContextImplTest.java @@ -17,6 +17,7 @@ package com.google.cloud.spanner; import static org.junit.Assert.assertThrows; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyMap; import static org.mockito.Mockito.doNothing; @@ -27,6 +28,7 @@ import com.google.api.core.ApiFutures; import com.google.cloud.spanner.TransactionRunnerImpl.TransactionContextImpl; import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.cloud.spanner.v1.stub.SpannerStubSettings; import com.google.protobuf.ByteString; import com.google.protobuf.Timestamp; import com.google.rpc.Code; @@ -34,6 +36,7 @@ import com.google.spanner.v1.CommitRequest; import com.google.spanner.v1.ExecuteBatchDmlRequest; import com.google.spanner.v1.ExecuteBatchDmlResponse; +import io.opentelemetry.api.common.Attributes; import java.util.Collections; import org.junit.Before; import org.junit.Test; @@ -67,7 +70,19 @@ public void setup() { doNothing().when(span).setStatus(any(Throwable.class)); doNothing().when(span).end(); doNothing().when(span).addAnnotation("Starting Commit"); + when(tracer.createStatementAttributes(any(Statement.class), any())) + .thenReturn(Attributes.empty()); + when(tracer.createStatementBatchAttributes(any(Iterable.class), any())) + .thenReturn(Attributes.empty()); when(tracer.spanBuilderWithExplicitParent(SpannerImpl.COMMIT, span)).thenReturn(span); + when(tracer.spanBuilderWithExplicitParent( + eq(SpannerImpl.COMMIT), eq(span), any(Attributes.class))) + .thenReturn(span); + when(tracer.spanBuilderWithExplicitParent( + eq(SpannerImpl.BATCH_UPDATE), eq(span), any(Attributes.class))) + .thenReturn(span); + when(rpc.getCommitRetrySettings()) + .thenReturn(SpannerStubSettings.newBuilder().commitSettings().getRetrySettings()); } private TransactionContextImpl createContext() { @@ -187,7 +202,7 @@ public void testReturnCommitStats() { .setSession(session.getName()) .setTransactionId(transactionId) .build(); - verify(rpc).commitAsync(Mockito.eq(request), anyMap()); + verify(rpc).commitAsync(eq(request), anyMap()); } } @@ -210,6 +225,8 @@ private void batchDml(int status) { .setRpc(rpc) .setTransactionId(ByteString.copyFromUtf8("test")) .setOptions(Options.fromTransactionOptions()) + .setTracer(tracer) + .setSpan(span) .build()) { impl.batchUpdate(Collections.singletonList(statement)); } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionManagerImplTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionManagerImplTest.java index dc28b333c4f..c3fcf1c7480 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionManagerImplTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionManagerImplTest.java @@ -35,6 +35,7 @@ import com.google.cloud.grpc.GrpcTransportOptions.ExecutorFactory; import com.google.cloud.spanner.TransactionManager.TransactionState; import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.cloud.spanner.v1.stub.SpannerStubSettings; import com.google.protobuf.ByteString; import com.google.protobuf.Empty; import com.google.spanner.v1.BeginTransactionRequest; @@ -248,6 +249,8 @@ public void usesPreparedTransaction() { com.google.protobuf.Timestamp.newBuilder() .setSeconds(System.currentTimeMillis() * 1000)) .build())); + when(rpc.getCommitRetrySettings()) + .thenReturn(SpannerStubSettings.newBuilder().commitSettings().getRetrySettings()); DatabaseId db = DatabaseId.of("test", "test", "test"); try (SpannerImpl spanner = new SpannerImpl(rpc, options)) { DatabaseClient client = spanner.getDatabaseClient(db); @@ -332,6 +335,8 @@ public void inlineBegin() { com.google.protobuf.Timestamp.newBuilder() .setSeconds(System.currentTimeMillis() * 1000)) .build())); + when(rpc.getCommitRetrySettings()) + .thenReturn(SpannerStubSettings.newBuilder().commitSettings().getRetrySettings()); DatabaseId db = DatabaseId.of("test", "test", "test"); try (SpannerImpl spanner = new SpannerImpl(rpc, options)) { DatabaseClient client = spanner.getDatabaseClient(db); diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionRunnerImplTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionRunnerImplTest.java index d30df09d4c3..c647bb3642a 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionRunnerImplTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionRunnerImplTest.java @@ -32,9 +32,11 @@ import com.google.api.core.ApiFutures; import com.google.cloud.grpc.GrpcTransportOptions; import com.google.cloud.grpc.GrpcTransportOptions.ExecutorFactory; +import com.google.cloud.spanner.ErrorHandler.DefaultErrorHandler; import com.google.cloud.spanner.SessionClient.SessionId; import com.google.cloud.spanner.TransactionRunnerImpl.TransactionContextImpl; import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.cloud.spanner.v1.stub.SpannerStubSettings; import com.google.common.base.Preconditions; import com.google.protobuf.ByteString; import com.google.protobuf.Duration; @@ -112,8 +114,9 @@ public static void setupOpenTelemetry() { @Before public void setUp() { MockitoAnnotations.initMocks(this); - tracer = new TraceWrapper(Tracing.getTracer(), OpenTelemetry.noop().getTracer("")); + tracer = new TraceWrapper(Tracing.getTracer(), OpenTelemetry.noop().getTracer(""), false); firstRun = true; + when(session.getErrorHandler()).thenReturn(DefaultErrorHandler.INSTANCE); when(session.newTransaction(Options.fromTransactionOptions())).thenReturn(txn); when(session.getTracer()).thenReturn(tracer); when(rpc.executeQuery(Mockito.any(ExecuteSqlRequest.class), Mockito.anyMap(), eq(true))) @@ -141,6 +144,8 @@ public void setUp() { CommitResponse.newBuilder() .setCommitTimestamp(Timestamp.getDefaultInstance()) .build())); + when(rpc.getCommitRetrySettings()) + .thenReturn(SpannerStubSettings.newBuilder().commitSettings().getRetrySettings()); when(rpc.rollbackAsync(Mockito.any(RollbackRequest.class), Mockito.anyMap())) .thenReturn(ApiFutures.immediateFuture(Empty.getDefaultInstance())); Span oTspan = mock(Span.class); @@ -196,6 +201,8 @@ public void usesPreparedTransaction() { .setCommitTimestamp( Timestamp.newBuilder().setSeconds(System.currentTimeMillis() * 1000)) .build())); + when(rpc.getCommitRetrySettings()) + .thenReturn(SpannerStubSettings.newBuilder().commitSettings().getRetrySettings()); DatabaseId db = DatabaseId.of("test", "test", "test"); try (SpannerImpl spanner = new SpannerImpl(rpc, options)) { DatabaseClient client = spanner.getDatabaseClient(db); diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClientHttpJsonTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClientHttpJsonTest.java index 1fe968db228..b5a045b24a0 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClientHttpJsonTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClientHttpJsonTest.java @@ -17,6 +17,7 @@ package com.google.cloud.spanner.admin.database.v1; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupOperationsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupSchedulesPagedResponse; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupsPagedResponse; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseOperationsPagedResponse; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseRolesPagedResponse; @@ -41,11 +42,16 @@ import com.google.longrunning.Operation; import com.google.protobuf.Any; import com.google.protobuf.ByteString; +import com.google.protobuf.Duration; import com.google.protobuf.Empty; import com.google.protobuf.FieldMask; import com.google.protobuf.Timestamp; import com.google.spanner.admin.database.v1.Backup; import com.google.spanner.admin.database.v1.BackupName; +import com.google.spanner.admin.database.v1.BackupSchedule; +import com.google.spanner.admin.database.v1.BackupScheduleName; +import com.google.spanner.admin.database.v1.BackupScheduleSpec; +import com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig; import com.google.spanner.admin.database.v1.Database; import com.google.spanner.admin.database.v1.DatabaseDialect; import com.google.spanner.admin.database.v1.DatabaseName; @@ -55,6 +61,7 @@ import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; import com.google.spanner.admin.database.v1.InstanceName; import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; +import com.google.spanner.admin.database.v1.ListBackupSchedulesResponse; import com.google.spanner.admin.database.v1.ListBackupsResponse; import com.google.spanner.admin.database.v1.ListDatabaseOperationsResponse; import com.google.spanner.admin.database.v1.ListDatabaseRolesResponse; @@ -1079,11 +1086,17 @@ public void createBackupTest() throws Exception { .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) .setCreateTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) .addAllReferencingDatabases(new ArrayList()) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) .setDatabaseDialect(DatabaseDialect.forNumber(0)) .addAllReferencingBackups(new ArrayList()) .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) .build(); Operation resultOperation = Operation.newBuilder() @@ -1142,11 +1155,17 @@ public void createBackupTest2() throws Exception { .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) .setCreateTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) .addAllReferencingDatabases(new ArrayList()) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) .setDatabaseDialect(DatabaseDialect.forNumber(0)) .addAllReferencingBackups(new ArrayList()) .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) .build(); Operation resultOperation = Operation.newBuilder() @@ -1205,11 +1224,17 @@ public void copyBackupTest() throws Exception { .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) .setCreateTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) .addAllReferencingDatabases(new ArrayList()) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) .setDatabaseDialect(DatabaseDialect.forNumber(0)) .addAllReferencingBackups(new ArrayList()) .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) .build(); Operation resultOperation = Operation.newBuilder() @@ -1271,11 +1296,17 @@ public void copyBackupTest2() throws Exception { .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) .setCreateTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) .addAllReferencingDatabases(new ArrayList()) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) .setDatabaseDialect(DatabaseDialect.forNumber(0)) .addAllReferencingBackups(new ArrayList()) .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) .build(); Operation resultOperation = Operation.newBuilder() @@ -1337,11 +1368,17 @@ public void copyBackupTest3() throws Exception { .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) .setCreateTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) .addAllReferencingDatabases(new ArrayList()) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) .setDatabaseDialect(DatabaseDialect.forNumber(0)) .addAllReferencingBackups(new ArrayList()) .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) .build(); Operation resultOperation = Operation.newBuilder() @@ -1403,11 +1440,17 @@ public void copyBackupTest4() throws Exception { .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) .setCreateTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) .addAllReferencingDatabases(new ArrayList()) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) .setDatabaseDialect(DatabaseDialect.forNumber(0)) .addAllReferencingBackups(new ArrayList()) .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) .build(); Operation resultOperation = Operation.newBuilder() @@ -1469,11 +1512,17 @@ public void getBackupTest() throws Exception { .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) .setCreateTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) .addAllReferencingDatabases(new ArrayList()) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) .setDatabaseDialect(DatabaseDialect.forNumber(0)) .addAllReferencingBackups(new ArrayList()) .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) .build(); mockService.addResponse(expectedResponse); @@ -1523,11 +1572,17 @@ public void getBackupTest2() throws Exception { .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) .setCreateTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) .addAllReferencingDatabases(new ArrayList()) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) .setDatabaseDialect(DatabaseDialect.forNumber(0)) .addAllReferencingBackups(new ArrayList()) .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) .build(); mockService.addResponse(expectedResponse); @@ -1577,11 +1632,17 @@ public void updateBackupTest() throws Exception { .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) .setCreateTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) .addAllReferencingDatabases(new ArrayList()) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) .setDatabaseDialect(DatabaseDialect.forNumber(0)) .addAllReferencingBackups(new ArrayList()) .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) .build(); mockService.addResponse(expectedResponse); @@ -1593,11 +1654,17 @@ public void updateBackupTest() throws Exception { .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) .setCreateTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) .addAllReferencingDatabases(new ArrayList()) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) .setDatabaseDialect(DatabaseDialect.forNumber(0)) .addAllReferencingBackups(new ArrayList()) .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) .build(); FieldMask updateMask = FieldMask.newBuilder().build(); @@ -1635,11 +1702,17 @@ public void updateBackupExceptionTest() throws Exception { .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) .setCreateTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) .addAllReferencingDatabases(new ArrayList()) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) .setDatabaseDialect(DatabaseDialect.forNumber(0)) .addAllReferencingBackups(new ArrayList()) .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) .build(); FieldMask updateMask = FieldMask.newBuilder().build(); client.updateBackup(backup, updateMask); @@ -2380,4 +2453,472 @@ public void listDatabaseRolesExceptionTest2() throws Exception { // Expected exception. } } + + @Test + public void createBackupScheduleTest() throws Exception { + BackupSchedule expectedResponse = + BackupSchedule.newBuilder() + .setName( + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]") + .toString()) + .setSpec(BackupScheduleSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().build()) + .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + BackupSchedule backupSchedule = BackupSchedule.newBuilder().build(); + String backupScheduleId = "backupScheduleId1704974708"; + + BackupSchedule actualResponse = + client.createBackupSchedule(parent, backupSchedule, backupScheduleId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createBackupScheduleExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + BackupSchedule backupSchedule = BackupSchedule.newBuilder().build(); + String backupScheduleId = "backupScheduleId1704974708"; + client.createBackupSchedule(parent, backupSchedule, backupScheduleId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createBackupScheduleTest2() throws Exception { + BackupSchedule expectedResponse = + BackupSchedule.newBuilder() + .setName( + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]") + .toString()) + .setSpec(BackupScheduleSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().build()) + .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + String parent = "projects/project-9347/instances/instance-9347/databases/database-9347"; + BackupSchedule backupSchedule = BackupSchedule.newBuilder().build(); + String backupScheduleId = "backupScheduleId1704974708"; + + BackupSchedule actualResponse = + client.createBackupSchedule(parent, backupSchedule, backupScheduleId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void createBackupScheduleExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-9347/instances/instance-9347/databases/database-9347"; + BackupSchedule backupSchedule = BackupSchedule.newBuilder().build(); + String backupScheduleId = "backupScheduleId1704974708"; + client.createBackupSchedule(parent, backupSchedule, backupScheduleId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getBackupScheduleTest() throws Exception { + BackupSchedule expectedResponse = + BackupSchedule.newBuilder() + .setName( + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]") + .toString()) + .setSpec(BackupScheduleSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().build()) + .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + BackupScheduleName name = + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]"); + + BackupSchedule actualResponse = client.getBackupSchedule(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getBackupScheduleExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + BackupScheduleName name = + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]"); + client.getBackupSchedule(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getBackupScheduleTest2() throws Exception { + BackupSchedule expectedResponse = + BackupSchedule.newBuilder() + .setName( + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]") + .toString()) + .setSpec(BackupScheduleSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().build()) + .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + String name = + "projects/project-8764/instances/instance-8764/databases/database-8764/backupSchedules/backupSchedule-8764"; + + BackupSchedule actualResponse = client.getBackupSchedule(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void getBackupScheduleExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = + "projects/project-8764/instances/instance-8764/databases/database-8764/backupSchedules/backupSchedule-8764"; + client.getBackupSchedule(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateBackupScheduleTest() throws Exception { + BackupSchedule expectedResponse = + BackupSchedule.newBuilder() + .setName( + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]") + .toString()) + .setSpec(BackupScheduleSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().build()) + .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + mockService.addResponse(expectedResponse); + + BackupSchedule backupSchedule = + BackupSchedule.newBuilder() + .setName( + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]") + .toString()) + .setSpec(BackupScheduleSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().build()) + .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + BackupSchedule actualResponse = client.updateBackupSchedule(backupSchedule, updateMask); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void updateBackupScheduleExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + BackupSchedule backupSchedule = + BackupSchedule.newBuilder() + .setName( + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]") + .toString()) + .setSpec(BackupScheduleSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().build()) + .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateBackupSchedule(backupSchedule, updateMask); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteBackupScheduleTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + BackupScheduleName name = + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]"); + + client.deleteBackupSchedule(name); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void deleteBackupScheduleExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + BackupScheduleName name = + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]"); + client.deleteBackupSchedule(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteBackupScheduleTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockService.addResponse(expectedResponse); + + String name = + "projects/project-8764/instances/instance-8764/databases/database-8764/backupSchedules/backupSchedule-8764"; + + client.deleteBackupSchedule(name); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void deleteBackupScheduleExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String name = + "projects/project-8764/instances/instance-8764/databases/database-8764/backupSchedules/backupSchedule-8764"; + client.deleteBackupSchedule(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listBackupSchedulesTest() throws Exception { + BackupSchedule responsesElement = BackupSchedule.newBuilder().build(); + ListBackupSchedulesResponse expectedResponse = + ListBackupSchedulesResponse.newBuilder() + .setNextPageToken("") + .addAllBackupSchedules(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + + ListBackupSchedulesPagedResponse pagedListResponse = client.listBackupSchedules(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getBackupSchedulesList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listBackupSchedulesExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + client.listBackupSchedules(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listBackupSchedulesTest2() throws Exception { + BackupSchedule responsesElement = BackupSchedule.newBuilder().build(); + ListBackupSchedulesResponse expectedResponse = + ListBackupSchedulesResponse.newBuilder() + .setNextPageToken("") + .addAllBackupSchedules(Arrays.asList(responsesElement)) + .build(); + mockService.addResponse(expectedResponse); + + String parent = "projects/project-9347/instances/instance-9347/databases/database-9347"; + + ListBackupSchedulesPagedResponse pagedListResponse = client.listBackupSchedules(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getBackupSchedulesList().get(0), resources.get(0)); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void listBackupSchedulesExceptionTest2() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + String parent = "projects/project-9347/instances/instance-9347/databases/database-9347"; + client.listBackupSchedules(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClientTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClientTest.java index b8f56295789..a4de864ee6f 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClientTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClientTest.java @@ -17,6 +17,7 @@ package com.google.cloud.spanner.admin.database.v1; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupOperationsPagedResponse; +import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupSchedulesPagedResponse; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListBackupsPagedResponse; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseOperationsPagedResponse; import static com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient.ListDatabaseRolesPagedResponse; @@ -43,29 +44,39 @@ import com.google.protobuf.AbstractMessage; import com.google.protobuf.Any; import com.google.protobuf.ByteString; +import com.google.protobuf.Duration; import com.google.protobuf.Empty; import com.google.protobuf.FieldMask; import com.google.protobuf.Timestamp; import com.google.spanner.admin.database.v1.Backup; import com.google.spanner.admin.database.v1.BackupName; +import com.google.spanner.admin.database.v1.BackupSchedule; +import com.google.spanner.admin.database.v1.BackupScheduleName; +import com.google.spanner.admin.database.v1.BackupScheduleSpec; import com.google.spanner.admin.database.v1.CopyBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig; import com.google.spanner.admin.database.v1.CreateBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupScheduleRequest; import com.google.spanner.admin.database.v1.CreateDatabaseRequest; import com.google.spanner.admin.database.v1.Database; import com.google.spanner.admin.database.v1.DatabaseDialect; import com.google.spanner.admin.database.v1.DatabaseName; import com.google.spanner.admin.database.v1.DatabaseRole; import com.google.spanner.admin.database.v1.DeleteBackupRequest; +import com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest; import com.google.spanner.admin.database.v1.DropDatabaseRequest; import com.google.spanner.admin.database.v1.EncryptionConfig; import com.google.spanner.admin.database.v1.EncryptionInfo; import com.google.spanner.admin.database.v1.GetBackupRequest; +import com.google.spanner.admin.database.v1.GetBackupScheduleRequest; import com.google.spanner.admin.database.v1.GetDatabaseDdlRequest; import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; import com.google.spanner.admin.database.v1.GetDatabaseRequest; import com.google.spanner.admin.database.v1.InstanceName; import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; +import com.google.spanner.admin.database.v1.ListBackupSchedulesRequest; +import com.google.spanner.admin.database.v1.ListBackupSchedulesResponse; import com.google.spanner.admin.database.v1.ListBackupsRequest; import com.google.spanner.admin.database.v1.ListBackupsResponse; import com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest; @@ -77,6 +88,7 @@ import com.google.spanner.admin.database.v1.RestoreDatabaseRequest; import com.google.spanner.admin.database.v1.RestoreInfo; import com.google.spanner.admin.database.v1.UpdateBackupRequest; +import com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest; import com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest; import com.google.spanner.admin.database.v1.UpdateDatabaseRequest; import io.grpc.StatusRuntimeException; @@ -987,11 +999,17 @@ public void createBackupTest() throws Exception { .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) .setCreateTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) .addAllReferencingDatabases(new ArrayList()) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) .setDatabaseDialect(DatabaseDialect.forNumber(0)) .addAllReferencingBackups(new ArrayList()) .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) .build(); Operation resultOperation = Operation.newBuilder() @@ -1049,11 +1067,17 @@ public void createBackupTest2() throws Exception { .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) .setCreateTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) .addAllReferencingDatabases(new ArrayList()) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) .setDatabaseDialect(DatabaseDialect.forNumber(0)) .addAllReferencingBackups(new ArrayList()) .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) .build(); Operation resultOperation = Operation.newBuilder() @@ -1111,11 +1135,17 @@ public void copyBackupTest() throws Exception { .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) .setCreateTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) .addAllReferencingDatabases(new ArrayList()) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) .setDatabaseDialect(DatabaseDialect.forNumber(0)) .addAllReferencingBackups(new ArrayList()) .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) .build(); Operation resultOperation = Operation.newBuilder() @@ -1177,11 +1207,17 @@ public void copyBackupTest2() throws Exception { .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) .setCreateTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) .addAllReferencingDatabases(new ArrayList()) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) .setDatabaseDialect(DatabaseDialect.forNumber(0)) .addAllReferencingBackups(new ArrayList()) .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) .build(); Operation resultOperation = Operation.newBuilder() @@ -1243,11 +1279,17 @@ public void copyBackupTest3() throws Exception { .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) .setCreateTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) .addAllReferencingDatabases(new ArrayList()) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) .setDatabaseDialect(DatabaseDialect.forNumber(0)) .addAllReferencingBackups(new ArrayList()) .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) .build(); Operation resultOperation = Operation.newBuilder() @@ -1309,11 +1351,17 @@ public void copyBackupTest4() throws Exception { .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) .setCreateTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) .addAllReferencingDatabases(new ArrayList()) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) .setDatabaseDialect(DatabaseDialect.forNumber(0)) .addAllReferencingBackups(new ArrayList()) .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) .build(); Operation resultOperation = Operation.newBuilder() @@ -1375,11 +1423,17 @@ public void getBackupTest() throws Exception { .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) .setCreateTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) .addAllReferencingDatabases(new ArrayList()) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) .setDatabaseDialect(DatabaseDialect.forNumber(0)) .addAllReferencingBackups(new ArrayList()) .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) .build(); mockDatabaseAdmin.addResponse(expectedResponse); @@ -1423,11 +1477,17 @@ public void getBackupTest2() throws Exception { .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) .setCreateTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) .addAllReferencingDatabases(new ArrayList()) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) .setDatabaseDialect(DatabaseDialect.forNumber(0)) .addAllReferencingBackups(new ArrayList()) .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) .build(); mockDatabaseAdmin.addResponse(expectedResponse); @@ -1471,11 +1531,17 @@ public void updateBackupTest() throws Exception { .setName(BackupName.of("[PROJECT]", "[INSTANCE]", "[BACKUP]").toString()) .setCreateTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) + .setFreeableSizeBytes(1302251206) + .setExclusiveSizeBytes(-1085921554) .addAllReferencingDatabases(new ArrayList()) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .addAllEncryptionInformation(new ArrayList()) .setDatabaseDialect(DatabaseDialect.forNumber(0)) .addAllReferencingBackups(new ArrayList()) .setMaxExpireTime(Timestamp.newBuilder().build()) + .addAllBackupSchedules(new ArrayList()) + .setIncrementalBackupChainId("incrementalBackupChainId1926005216") + .setOldestVersionTime(Timestamp.newBuilder().build()) .build(); mockDatabaseAdmin.addResponse(expectedResponse); @@ -2183,4 +2249,406 @@ public void listDatabaseRolesExceptionTest2() throws Exception { // Expected exception. } } + + @Test + public void createBackupScheduleTest() throws Exception { + BackupSchedule expectedResponse = + BackupSchedule.newBuilder() + .setName( + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]") + .toString()) + .setSpec(BackupScheduleSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().build()) + .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + BackupSchedule backupSchedule = BackupSchedule.newBuilder().build(); + String backupScheduleId = "backupScheduleId1704974708"; + + BackupSchedule actualResponse = + client.createBackupSchedule(parent, backupSchedule, backupScheduleId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateBackupScheduleRequest actualRequest = + ((CreateBackupScheduleRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertEquals(backupSchedule, actualRequest.getBackupSchedule()); + Assert.assertEquals(backupScheduleId, actualRequest.getBackupScheduleId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createBackupScheduleExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + BackupSchedule backupSchedule = BackupSchedule.newBuilder().build(); + String backupScheduleId = "backupScheduleId1704974708"; + client.createBackupSchedule(parent, backupSchedule, backupScheduleId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void createBackupScheduleTest2() throws Exception { + BackupSchedule expectedResponse = + BackupSchedule.newBuilder() + .setName( + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]") + .toString()) + .setSpec(BackupScheduleSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().build()) + .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + String parent = "parent-995424086"; + BackupSchedule backupSchedule = BackupSchedule.newBuilder().build(); + String backupScheduleId = "backupScheduleId1704974708"; + + BackupSchedule actualResponse = + client.createBackupSchedule(parent, backupSchedule, backupScheduleId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateBackupScheduleRequest actualRequest = + ((CreateBackupScheduleRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(backupSchedule, actualRequest.getBackupSchedule()); + Assert.assertEquals(backupScheduleId, actualRequest.getBackupScheduleId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void createBackupScheduleExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String parent = "parent-995424086"; + BackupSchedule backupSchedule = BackupSchedule.newBuilder().build(); + String backupScheduleId = "backupScheduleId1704974708"; + client.createBackupSchedule(parent, backupSchedule, backupScheduleId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getBackupScheduleTest() throws Exception { + BackupSchedule expectedResponse = + BackupSchedule.newBuilder() + .setName( + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]") + .toString()) + .setSpec(BackupScheduleSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().build()) + .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + BackupScheduleName name = + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]"); + + BackupSchedule actualResponse = client.getBackupSchedule(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetBackupScheduleRequest actualRequest = ((GetBackupScheduleRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getBackupScheduleExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + BackupScheduleName name = + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]"); + client.getBackupSchedule(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void getBackupScheduleTest2() throws Exception { + BackupSchedule expectedResponse = + BackupSchedule.newBuilder() + .setName( + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]") + .toString()) + .setSpec(BackupScheduleSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().build()) + .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + String name = "name3373707"; + + BackupSchedule actualResponse = client.getBackupSchedule(name); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + GetBackupScheduleRequest actualRequest = ((GetBackupScheduleRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void getBackupScheduleExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String name = "name3373707"; + client.getBackupSchedule(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void updateBackupScheduleTest() throws Exception { + BackupSchedule expectedResponse = + BackupSchedule.newBuilder() + .setName( + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]") + .toString()) + .setSpec(BackupScheduleSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().build()) + .setEncryptionConfig(CreateBackupEncryptionConfig.newBuilder().build()) + .setUpdateTime(Timestamp.newBuilder().build()) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + BackupSchedule backupSchedule = BackupSchedule.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + + BackupSchedule actualResponse = client.updateBackupSchedule(backupSchedule, updateMask); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + UpdateBackupScheduleRequest actualRequest = + ((UpdateBackupScheduleRequest) actualRequests.get(0)); + + Assert.assertEquals(backupSchedule, actualRequest.getBackupSchedule()); + Assert.assertEquals(updateMask, actualRequest.getUpdateMask()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void updateBackupScheduleExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + BackupSchedule backupSchedule = BackupSchedule.newBuilder().build(); + FieldMask updateMask = FieldMask.newBuilder().build(); + client.updateBackupSchedule(backupSchedule, updateMask); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteBackupScheduleTest() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + BackupScheduleName name = + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]"); + + client.deleteBackupSchedule(name); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteBackupScheduleRequest actualRequest = + ((DeleteBackupScheduleRequest) actualRequests.get(0)); + + Assert.assertEquals(name.toString(), actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteBackupScheduleExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + BackupScheduleName name = + BackupScheduleName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SCHEDULE]"); + client.deleteBackupSchedule(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void deleteBackupScheduleTest2() throws Exception { + Empty expectedResponse = Empty.newBuilder().build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + String name = "name3373707"; + + client.deleteBackupSchedule(name); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + DeleteBackupScheduleRequest actualRequest = + ((DeleteBackupScheduleRequest) actualRequests.get(0)); + + Assert.assertEquals(name, actualRequest.getName()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void deleteBackupScheduleExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String name = "name3373707"; + client.deleteBackupSchedule(name); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listBackupSchedulesTest() throws Exception { + BackupSchedule responsesElement = BackupSchedule.newBuilder().build(); + ListBackupSchedulesResponse expectedResponse = + ListBackupSchedulesResponse.newBuilder() + .setNextPageToken("") + .addAllBackupSchedules(Arrays.asList(responsesElement)) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + + ListBackupSchedulesPagedResponse pagedListResponse = client.listBackupSchedules(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getBackupSchedulesList().get(0), resources.get(0)); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListBackupSchedulesRequest actualRequest = ((ListBackupSchedulesRequest) actualRequests.get(0)); + + Assert.assertEquals(parent.toString(), actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listBackupSchedulesExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + DatabaseName parent = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + client.listBackupSchedules(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void listBackupSchedulesTest2() throws Exception { + BackupSchedule responsesElement = BackupSchedule.newBuilder().build(); + ListBackupSchedulesResponse expectedResponse = + ListBackupSchedulesResponse.newBuilder() + .setNextPageToken("") + .addAllBackupSchedules(Arrays.asList(responsesElement)) + .build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + String parent = "parent-995424086"; + + ListBackupSchedulesPagedResponse pagedListResponse = client.listBackupSchedules(parent); + + List resources = Lists.newArrayList(pagedListResponse.iterateAll()); + + Assert.assertEquals(1, resources.size()); + Assert.assertEquals(expectedResponse.getBackupSchedulesList().get(0), resources.get(0)); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + ListBackupSchedulesRequest actualRequest = ((ListBackupSchedulesRequest) actualRequests.get(0)); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void listBackupSchedulesExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String parent = "parent-995424086"; + client.listBackupSchedules(parent); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/MockDatabaseAdminImpl.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/MockDatabaseAdminImpl.java index f77ad2edbc8..9e273ed1550 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/MockDatabaseAdminImpl.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/MockDatabaseAdminImpl.java @@ -26,19 +26,25 @@ import com.google.protobuf.AbstractMessage; import com.google.protobuf.Empty; import com.google.spanner.admin.database.v1.Backup; +import com.google.spanner.admin.database.v1.BackupSchedule; import com.google.spanner.admin.database.v1.CopyBackupRequest; import com.google.spanner.admin.database.v1.CreateBackupRequest; +import com.google.spanner.admin.database.v1.CreateBackupScheduleRequest; import com.google.spanner.admin.database.v1.CreateDatabaseRequest; import com.google.spanner.admin.database.v1.Database; import com.google.spanner.admin.database.v1.DatabaseAdminGrpc.DatabaseAdminImplBase; import com.google.spanner.admin.database.v1.DeleteBackupRequest; +import com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest; import com.google.spanner.admin.database.v1.DropDatabaseRequest; import com.google.spanner.admin.database.v1.GetBackupRequest; +import com.google.spanner.admin.database.v1.GetBackupScheduleRequest; import com.google.spanner.admin.database.v1.GetDatabaseDdlRequest; import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; import com.google.spanner.admin.database.v1.GetDatabaseRequest; import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; +import com.google.spanner.admin.database.v1.ListBackupSchedulesRequest; +import com.google.spanner.admin.database.v1.ListBackupSchedulesResponse; import com.google.spanner.admin.database.v1.ListBackupsRequest; import com.google.spanner.admin.database.v1.ListBackupsResponse; import com.google.spanner.admin.database.v1.ListDatabaseOperationsRequest; @@ -49,6 +55,7 @@ import com.google.spanner.admin.database.v1.ListDatabasesResponse; import com.google.spanner.admin.database.v1.RestoreDatabaseRequest; import com.google.spanner.admin.database.v1.UpdateBackupRequest; +import com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest; import com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest; import com.google.spanner.admin.database.v1.UpdateDatabaseRequest; import io.grpc.stub.StreamObserver; @@ -505,4 +512,110 @@ public void listDatabaseRoles( Exception.class.getName()))); } } + + @Override + public void createBackupSchedule( + CreateBackupScheduleRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof BackupSchedule) { + requests.add(request); + responseObserver.onNext(((BackupSchedule) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method CreateBackupSchedule, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + BackupSchedule.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void getBackupSchedule( + GetBackupScheduleRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof BackupSchedule) { + requests.add(request); + responseObserver.onNext(((BackupSchedule) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method GetBackupSchedule, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + BackupSchedule.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void updateBackupSchedule( + UpdateBackupScheduleRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof BackupSchedule) { + requests.add(request); + responseObserver.onNext(((BackupSchedule) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method UpdateBackupSchedule, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + BackupSchedule.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void deleteBackupSchedule( + DeleteBackupScheduleRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Empty) { + requests.add(request); + responseObserver.onNext(((Empty) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method DeleteBackupSchedule, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Empty.class.getName(), + Exception.class.getName()))); + } + } + + @Override + public void listBackupSchedules( + ListBackupSchedulesRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof ListBackupSchedulesResponse) { + requests.add(request); + responseObserver.onNext(((ListBackupSchedulesResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method ListBackupSchedules, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + ListBackupSchedulesResponse.class.getName(), + Exception.class.getName()))); + } + } } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminClientHttpJsonTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminClientHttpJsonTest.java index 99d8138ad93..50532826e53 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminClientHttpJsonTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminClientHttpJsonTest.java @@ -56,7 +56,10 @@ import com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse; import com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse; import com.google.spanner.admin.instance.v1.ListInstancesResponse; +import com.google.spanner.admin.instance.v1.MoveInstanceRequest; +import com.google.spanner.admin.instance.v1.MoveInstanceResponse; import com.google.spanner.admin.instance.v1.ProjectName; +import com.google.spanner.admin.instance.v1.ReplicaComputeCapacity; import com.google.spanner.admin.instance.v1.ReplicaInfo; import java.io.IOException; import java.util.ArrayList; @@ -908,6 +911,7 @@ public void getInstanceTest() throws Exception { .setDisplayName("displayName1714148973") .setNodeCount(1539922066) .setProcessingUnits(-329117885) + .addAllReplicaComputeCapacity(new ArrayList()) .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) .putAllLabels(new HashMap()) .addAllEndpointUris(new ArrayList()) @@ -961,6 +965,7 @@ public void getInstanceTest2() throws Exception { .setDisplayName("displayName1714148973") .setNodeCount(1539922066) .setProcessingUnits(-329117885) + .addAllReplicaComputeCapacity(new ArrayList()) .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) .putAllLabels(new HashMap()) .addAllEndpointUris(new ArrayList()) @@ -1014,6 +1019,7 @@ public void createInstanceTest() throws Exception { .setDisplayName("displayName1714148973") .setNodeCount(1539922066) .setProcessingUnits(-329117885) + .addAllReplicaComputeCapacity(new ArrayList()) .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) .putAllLabels(new HashMap()) .addAllEndpointUris(new ArrayList()) @@ -1076,6 +1082,7 @@ public void createInstanceTest2() throws Exception { .setDisplayName("displayName1714148973") .setNodeCount(1539922066) .setProcessingUnits(-329117885) + .addAllReplicaComputeCapacity(new ArrayList()) .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) .putAllLabels(new HashMap()) .addAllEndpointUris(new ArrayList()) @@ -1138,6 +1145,7 @@ public void updateInstanceTest() throws Exception { .setDisplayName("displayName1714148973") .setNodeCount(1539922066) .setProcessingUnits(-329117885) + .addAllReplicaComputeCapacity(new ArrayList()) .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) .putAllLabels(new HashMap()) .addAllEndpointUris(new ArrayList()) @@ -1159,6 +1167,7 @@ public void updateInstanceTest() throws Exception { .setDisplayName("displayName1714148973") .setNodeCount(1539922066) .setProcessingUnits(-329117885) + .addAllReplicaComputeCapacity(new ArrayList()) .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) .putAllLabels(new HashMap()) .addAllEndpointUris(new ArrayList()) @@ -1200,6 +1209,7 @@ public void updateInstanceExceptionTest() throws Exception { .setDisplayName("displayName1714148973") .setNodeCount(1539922066) .setProcessingUnits(-329117885) + .addAllReplicaComputeCapacity(new ArrayList()) .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) .putAllLabels(new HashMap()) .addAllEndpointUris(new ArrayList()) @@ -2079,4 +2089,58 @@ public void listInstancePartitionOperationsExceptionTest2() throws Exception { // Expected exception. } } + + @Test + public void moveInstanceTest() throws Exception { + MoveInstanceResponse expectedResponse = MoveInstanceResponse.newBuilder().build(); + Operation resultOperation = + Operation.newBuilder() + .setName("moveInstanceTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockService.addResponse(resultOperation); + + MoveInstanceRequest request = + MoveInstanceRequest.newBuilder() + .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString()) + .setTargetConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .build(); + + MoveInstanceResponse actualResponse = client.moveInstanceAsync(request).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockService.getRequestPaths(); + Assert.assertEquals(1, actualRequests.size()); + + String apiClientHeaderKey = + mockService + .getRequestHeaders() + .get(ApiClientHeaderProvider.getDefaultApiClientHeaderKey()) + .iterator() + .next(); + Assert.assertTrue( + GaxHttpJsonProperties.getDefaultApiClientHeaderPattern() + .matcher(apiClientHeaderKey) + .matches()); + } + + @Test + public void moveInstanceExceptionTest() throws Exception { + ApiException exception = + ApiExceptionFactory.createException( + new Exception(), FakeStatusCode.of(StatusCode.Code.INVALID_ARGUMENT), false); + mockService.addException(exception); + + try { + MoveInstanceRequest request = + MoveInstanceRequest.newBuilder() + .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString()) + .setTargetConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .build(); + client.moveInstanceAsync(request).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + } + } } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminClientTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminClientTest.java index fbd9ca6c40d..73c6de9b2ca 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminClientTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/instance/v1/InstanceAdminClientTest.java @@ -72,7 +72,10 @@ import com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse; import com.google.spanner.admin.instance.v1.ListInstancesRequest; import com.google.spanner.admin.instance.v1.ListInstancesResponse; +import com.google.spanner.admin.instance.v1.MoveInstanceRequest; +import com.google.spanner.admin.instance.v1.MoveInstanceResponse; import com.google.spanner.admin.instance.v1.ProjectName; +import com.google.spanner.admin.instance.v1.ReplicaComputeCapacity; import com.google.spanner.admin.instance.v1.ReplicaInfo; import com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest; import com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest; @@ -843,6 +846,7 @@ public void getInstanceTest() throws Exception { .setDisplayName("displayName1714148973") .setNodeCount(1539922066) .setProcessingUnits(-329117885) + .addAllReplicaComputeCapacity(new ArrayList()) .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) .putAllLabels(new HashMap()) .addAllEndpointUris(new ArrayList()) @@ -890,6 +894,7 @@ public void getInstanceTest2() throws Exception { .setDisplayName("displayName1714148973") .setNodeCount(1539922066) .setProcessingUnits(-329117885) + .addAllReplicaComputeCapacity(new ArrayList()) .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) .putAllLabels(new HashMap()) .addAllEndpointUris(new ArrayList()) @@ -937,6 +942,7 @@ public void createInstanceTest() throws Exception { .setDisplayName("displayName1714148973") .setNodeCount(1539922066) .setProcessingUnits(-329117885) + .addAllReplicaComputeCapacity(new ArrayList()) .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) .putAllLabels(new HashMap()) .addAllEndpointUris(new ArrayList()) @@ -998,6 +1004,7 @@ public void createInstanceTest2() throws Exception { .setDisplayName("displayName1714148973") .setNodeCount(1539922066) .setProcessingUnits(-329117885) + .addAllReplicaComputeCapacity(new ArrayList()) .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) .putAllLabels(new HashMap()) .addAllEndpointUris(new ArrayList()) @@ -1059,6 +1066,7 @@ public void updateInstanceTest() throws Exception { .setDisplayName("displayName1714148973") .setNodeCount(1539922066) .setProcessingUnits(-329117885) + .addAllReplicaComputeCapacity(new ArrayList()) .setAutoscalingConfig(AutoscalingConfig.newBuilder().build()) .putAllLabels(new HashMap()) .addAllEndpointUris(new ArrayList()) @@ -1871,4 +1879,56 @@ public void listInstancePartitionOperationsExceptionTest2() throws Exception { // Expected exception. } } + + @Test + public void moveInstanceTest() throws Exception { + MoveInstanceResponse expectedResponse = MoveInstanceResponse.newBuilder().build(); + Operation resultOperation = + Operation.newBuilder() + .setName("moveInstanceTest") + .setDone(true) + .setResponse(Any.pack(expectedResponse)) + .build(); + mockInstanceAdmin.addResponse(resultOperation); + + MoveInstanceRequest request = + MoveInstanceRequest.newBuilder() + .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString()) + .setTargetConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .build(); + + MoveInstanceResponse actualResponse = client.moveInstanceAsync(request).get(); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockInstanceAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + MoveInstanceRequest actualRequest = ((MoveInstanceRequest) actualRequests.get(0)); + + Assert.assertEquals(request.getName(), actualRequest.getName()); + Assert.assertEquals(request.getTargetConfig(), actualRequest.getTargetConfig()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void moveInstanceExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockInstanceAdmin.addException(exception); + + try { + MoveInstanceRequest request = + MoveInstanceRequest.newBuilder() + .setName(InstanceName.of("[PROJECT]", "[INSTANCE]").toString()) + .setTargetConfig(InstanceConfigName.of("[PROJECT]", "[INSTANCE_CONFIG]").toString()) + .build(); + client.moveInstanceAsync(request).get(); + Assert.fail("No exception raised"); + } catch (ExecutionException e) { + Assert.assertEquals(InvalidArgumentException.class, e.getCause().getClass()); + InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/instance/v1/MockInstanceAdminImpl.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/instance/v1/MockInstanceAdminImpl.java index b6f95a3f504..d8920f79399 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/instance/v1/MockInstanceAdminImpl.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/instance/v1/MockInstanceAdminImpl.java @@ -48,6 +48,7 @@ import com.google.spanner.admin.instance.v1.ListInstancePartitionsResponse; import com.google.spanner.admin.instance.v1.ListInstancesRequest; import com.google.spanner.admin.instance.v1.ListInstancesResponse; +import com.google.spanner.admin.instance.v1.MoveInstanceRequest; import com.google.spanner.admin.instance.v1.UpdateInstanceConfigRequest; import com.google.spanner.admin.instance.v1.UpdateInstancePartitionRequest; import com.google.spanner.admin.instance.v1.UpdateInstanceRequest; @@ -511,4 +512,25 @@ public void listInstancePartitionOperations( Exception.class.getName()))); } } + + @Override + public void moveInstance( + MoveInstanceRequest request, StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof Operation) { + requests.add(request); + responseObserver.onNext(((Operation) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method MoveInstance, expected %s or %s", + response == null ? "null" : response.getClass().getName(), + Operation.class.getName(), + Exception.class.getName()))); + } + } } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AbstractMockServerTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AbstractMockServerTest.java index 1a5cfdf73e4..3e08eafed07 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AbstractMockServerTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AbstractMockServerTest.java @@ -43,7 +43,12 @@ import com.google.spanner.v1.StructType.Field; import com.google.spanner.v1.Type; import com.google.spanner.v1.TypeCode; +import io.grpc.Metadata; import io.grpc.Server; +import io.grpc.ServerCall; +import io.grpc.ServerCall.Listener; +import io.grpc.ServerCallHandler; +import io.grpc.ServerInterceptor; import io.grpc.internal.LogExceptionRunnable; import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; import io.grpc.stub.StreamObserver; @@ -151,6 +156,10 @@ public abstract class AbstractMockServerTest { @BeforeClass public static void startStaticServer() throws IOException { + startStaticServer(createServerInterceptor()); + } + + public static void startStaticServer(ServerInterceptor interceptor) throws IOException { mockSpanner = new MockSpannerServiceImpl(); mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. mockInstanceAdmin = new MockInstanceAdminImpl(); @@ -176,6 +185,7 @@ public void getOperation( .addService(mockInstanceAdmin) .addService(mockDatabaseAdmin) .addService(mockOperations) + .intercept(interceptor) .build() .start(); mockSpanner.putStatementResult( @@ -205,6 +215,16 @@ public void getOperation( Logger.getLogger("io.grpc.internal.AbstractClientStream").setUseParentHandlers(false); } + static ServerInterceptor createServerInterceptor() { + return new ServerInterceptor() { + @Override + public Listener interceptCall( + ServerCall call, Metadata headers, ServerCallHandler next) { + return next.startCall(call, headers); + } + }; + } + @AfterClass public static void stopServer() { try { diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AnalyzeStatementsTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AnalyzeStatementsTest.java index 3adb50b170f..6f76963fef7 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AnalyzeStatementsTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/AnalyzeStatementsTest.java @@ -355,8 +355,9 @@ public void testAnalyzeUpdateStatementDdlBatch() { } @Test - public void testAnalyzeUpdateDmlBatch() { + public void testAnalyzeUpdateDmlBatch_AutoCommit() { try (Connection connection = createConnection()) { + connection.setAutocommit(true); connection.startBatchDml(); SpannerException exception = @@ -371,8 +372,23 @@ public void testAnalyzeUpdateDmlBatch() { } @Test - public void testAnalyzeUpdateStatementDmlBatch() { + public void testAnalyzeUpdateDmlBatch_Transactional() { try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.startBatchDml(); + + assertNotNull(connection.analyzeUpdate(PLAN_UPDATE, QueryAnalyzeMode.PLAN)); + assertEquals(-1L, connection.executeUpdate(INSERT_STATEMENT)); + connection.runBatch(); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + } + } + + @Test + public void testAnalyzeUpdateStatementDmlBatch_AutoCommit() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); connection.startBatchDml(); SpannerException exception = @@ -385,4 +401,18 @@ public void testAnalyzeUpdateStatementDmlBatch() { assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); } + + @Test + public void testAnalyzeUpdateStatementDmlBatch_Transactional() { + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.startBatchDml(); + + connection.analyzeUpdateStatement(PLAN_UPDATE, QueryAnalyzeMode.PLAN); + assertEquals(-1L, connection.executeUpdate(INSERT_STATEMENT)); + connection.runBatch(); + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + } + } } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ClientSideStatementsTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ClientSideStatementsTest.java index 51503fd456a..fa208e799f9 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ClientSideStatementsTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ClientSideStatementsTest.java @@ -18,17 +18,22 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; import com.google.cloud.spanner.Dialect; import com.google.cloud.spanner.ErrorCode; import com.google.cloud.spanner.SpannerExceptionFactory; import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; import com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType; import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStreamWriter; import java.io.PrintWriter; import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.time.temporal.ChronoUnit; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -101,6 +106,96 @@ public void testClientSideStatementType() { } } + private static class DurationTestData { + final String sql; + final Duration expected; + + DurationTestData(String sql, Duration expected) { + this.sql = sql; + this.expected = expected; + } + } + + @Test + public void testSetStatementTimeout() { + AbstractStatementParser parser = AbstractStatementParser.getInstance(dialect); + + String resetValue = dialect == Dialect.POSTGRESQL ? "default" : "null"; + for (DurationTestData data : + new DurationTestData[] { + new DurationTestData("set statement_timeout=10", Duration.ofMillis(10)), + new DurationTestData("set statement_timeout = 10", Duration.ofMillis(10)), + new DurationTestData("set statement_timeout = 10 ", Duration.ofMillis(10)), + new DurationTestData("set statement_timeout='10ms'", Duration.ofMillis(10)), + new DurationTestData("set statement_timeout = '10ms'", Duration.ofMillis(10)), + new DurationTestData("set statement_timeout = '10ms' ", Duration.ofMillis(10)), + new DurationTestData("set statement_timeout='10ns'", Duration.ofNanos(10)), + new DurationTestData("set statement_timeout = '10ns'", Duration.ofNanos(10)), + new DurationTestData("set statement_timeout = '10ns' ", Duration.ofNanos(10)), + new DurationTestData("set statement_timeout='10us'", Duration.of(10, ChronoUnit.MICROS)), + new DurationTestData( + "set statement_timeout = '10us'", Duration.of(10, ChronoUnit.MICROS)), + new DurationTestData( + "set statement_timeout = '10us' ", Duration.of(10, ChronoUnit.MICROS)), + new DurationTestData("set statement_timeout='10s'", Duration.ofSeconds(10)), + new DurationTestData("set statement_timeout = '10s'", Duration.ofSeconds(10)), + new DurationTestData("set statement_timeout = '10s' ", Duration.ofSeconds(10)), + new DurationTestData("set statement_timeout=" + resetValue, Duration.ZERO), + new DurationTestData("set statement_timeout = " + resetValue, Duration.ZERO), + new DurationTestData("set statement_timeout = " + resetValue + " ", Duration.ZERO), + }) { + ConnectionStatementExecutor executor = mock(ConnectionStatementExecutor.class); + ParsedStatement statement = parser.parse(Statement.of(data.sql)); + assertEquals( + ClientSideStatementType.SET_STATEMENT_TIMEOUT, statement.getClientSideStatementType()); + statement.getClientSideStatement().execute(executor, statement); + verify(executor).statementSetStatementTimeout(data.expected); + } + } + + @Test + public void testSetMaxCommitDelay() { + AbstractStatementParser parser = AbstractStatementParser.getInstance(dialect); + + String prefix = dialect == Dialect.POSTGRESQL ? "spanner." : ""; + for (DurationTestData data : + new DurationTestData[] { + new DurationTestData("set " + prefix + "max_commit_delay=10", Duration.ofMillis(10)), + new DurationTestData("set " + prefix + "max_commit_delay = 10", Duration.ofMillis(10)), + new DurationTestData("set " + prefix + "max_commit_delay = 10 ", Duration.ofMillis(10)), + new DurationTestData("set " + prefix + "max_commit_delay='10ms'", Duration.ofMillis(10)), + new DurationTestData( + "set " + prefix + "max_commit_delay = '10ms'", Duration.ofMillis(10)), + new DurationTestData( + "set " + prefix + "max_commit_delay = '10ms' ", Duration.ofMillis(10)), + new DurationTestData("set " + prefix + "max_commit_delay='10ns'", Duration.ofNanos(10)), + new DurationTestData("set " + prefix + "max_commit_delay = '10ns'", Duration.ofNanos(10)), + new DurationTestData( + "set " + prefix + "max_commit_delay = '10ns' ", Duration.ofNanos(10)), + new DurationTestData( + "set " + prefix + "max_commit_delay='10us'", Duration.of(10, ChronoUnit.MICROS)), + new DurationTestData( + "set " + prefix + "max_commit_delay = '10us'", Duration.of(10, ChronoUnit.MICROS)), + new DurationTestData( + "set " + prefix + "max_commit_delay = '10us' ", Duration.of(10, ChronoUnit.MICROS)), + new DurationTestData("set " + prefix + "max_commit_delay='10s'", Duration.ofSeconds(10)), + new DurationTestData( + "set " + prefix + "max_commit_delay = '10s'", Duration.ofSeconds(10)), + new DurationTestData( + "set " + prefix + "max_commit_delay = '10s' ", Duration.ofSeconds(10)), + new DurationTestData("set " + prefix + "max_commit_delay=null", Duration.ZERO), + new DurationTestData("set " + prefix + "max_commit_delay = null", Duration.ZERO), + new DurationTestData("set " + prefix + "max_commit_delay = null ", Duration.ZERO), + }) { + ConnectionStatementExecutor executor = mock(ConnectionStatementExecutor.class); + ParsedStatement statement = parser.parse(Statement.of(data.sql)); + assertEquals( + ClientSideStatementType.SET_MAX_COMMIT_DELAY, statement.getClientSideStatementType()); + statement.getClientSideStatement().execute(executor, statement); + verify(executor).statementSetMaxCommitDelay(data.expected); + } + } + private static PrintWriter writer; /** Generates the test script file */ diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionImplTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionImplTest.java index 7b57e3f9014..f118a77edbc 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionImplTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionImplTest.java @@ -25,6 +25,7 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; @@ -32,6 +33,7 @@ import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import static org.mockito.ArgumentMatchers.isNull; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.anyString; @@ -69,16 +71,20 @@ import com.google.cloud.spanner.TransactionRunner; import com.google.cloud.spanner.Type; import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; import com.google.cloud.spanner.connection.ConnectionImpl.UnitOfWorkType; import com.google.cloud.spanner.connection.ConnectionStatementExecutorImpl.StatementTimeoutGetter; import com.google.cloud.spanner.connection.ReadOnlyStalenessUtil.GetExactStaleness; import com.google.cloud.spanner.connection.StatementResult.ResultType; import com.google.cloud.spanner.connection.UnitOfWork.CallType; import com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState; +import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; +import com.google.common.io.ByteStreams; import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; import com.google.spanner.v1.ExecuteSqlRequest.QueryOptions; import com.google.spanner.v1.ResultSetStats; +import java.io.InputStream; import java.util.Arrays; import java.util.HashMap; import java.util.List; @@ -204,6 +210,7 @@ private static ResultSet createSelect1MockResultSet() { when(mockResultSet.next()).thenReturn(true, false); when(mockResultSet.getLong(0)).thenReturn(1L); when(mockResultSet.getLong("TEST")).thenReturn(1L); + when(mockResultSet.getType()).thenReturn(Type.struct()); when(mockResultSet.getColumnType(0)).thenReturn(Type.int64()); when(mockResultSet.getColumnType("TEST")).thenReturn(Type.int64()); return mockResultSet; @@ -219,8 +226,8 @@ private static DdlClient createDefaultMockDdlClient() { UpdateDatabaseDdlMetadata metadata = UpdateDatabaseDdlMetadata.getDefaultInstance(); ApiFuture futureMetadata = ApiFutures.immediateFuture(metadata); when(operation.getMetadata()).thenReturn(futureMetadata); - when(ddlClient.executeDdl(anyString())).thenCallRealMethod(); - when(ddlClient.executeDdl(anyList())).thenReturn(operation); + when(ddlClient.executeDdl(anyString(), isNull())).thenCallRealMethod(); + when(ddlClient.executeDdl(anyList(), isNull())).thenReturn(operation); return ddlClient; } catch (Exception e) { throw new RuntimeException(e); @@ -1519,6 +1526,7 @@ public void testAddRemoveTransactionRetryListener() { @Test public void testMergeQueryOptions() { ConnectionOptions connectionOptions = mock(ConnectionOptions.class); + when(connectionOptions.getInitialConnectionPropertyValues()).thenReturn(ImmutableMap.of()); SpannerPool spannerPool = mock(SpannerPool.class); DdlClient ddlClient = mock(DdlClient.class); DatabaseClient dbClient = mock(DatabaseClient.class); @@ -1628,6 +1636,7 @@ UnitOfWork getCurrentUnitOfWorkOrStartNewUnitOfWork(boolean isInternalMetadataQu public void testStatementTagAlwaysAllowed() { ConnectionOptions connectionOptions = mock(ConnectionOptions.class); when(connectionOptions.isAutocommit()).thenReturn(true); + when(connectionOptions.getInitialConnectionPropertyValues()).thenReturn(ImmutableMap.of()); SpannerPool spannerPool = mock(SpannerPool.class); DdlClient ddlClient = mock(DdlClient.class); DatabaseClient dbClient = mock(DatabaseClient.class); @@ -1672,6 +1681,7 @@ UnitOfWork getCurrentUnitOfWorkOrStartNewUnitOfWork(boolean isInternalMetadataQu public void testTransactionTagAllowedInTransaction() { ConnectionOptions connectionOptions = mock(ConnectionOptions.class); when(connectionOptions.isAutocommit()).thenReturn(false); + when(connectionOptions.getInitialConnectionPropertyValues()).thenReturn(ImmutableMap.of()); SpannerPool spannerPool = mock(SpannerPool.class); DdlClient ddlClient = mock(DdlClient.class); DatabaseClient dbClient = mock(DatabaseClient.class); @@ -1714,6 +1724,7 @@ connectionOptions, spannerPool, ddlClient, dbClient, mock(BatchClient.class))) { public void testTransactionTagNotAllowedWithoutTransaction() { ConnectionOptions connectionOptions = mock(ConnectionOptions.class); when(connectionOptions.isAutocommit()).thenReturn(true); + when(connectionOptions.getInitialConnectionPropertyValues()).thenReturn(ImmutableMap.of()); SpannerPool spannerPool = mock(SpannerPool.class); DdlClient ddlClient = mock(DdlClient.class); DatabaseClient dbClient = mock(DatabaseClient.class); @@ -1736,6 +1747,7 @@ connectionOptions, spannerPool, ddlClient, dbClient, mock(BatchClient.class))) { public void testTransactionTagNotAllowedAfterTransactionStarted() { ConnectionOptions connectionOptions = mock(ConnectionOptions.class); when(connectionOptions.isAutocommit()).thenReturn(false); + when(connectionOptions.getInitialConnectionPropertyValues()).thenReturn(ImmutableMap.of()); SpannerPool spannerPool = mock(SpannerPool.class); DdlClient ddlClient = mock(DdlClient.class); DatabaseClient dbClient = mock(DatabaseClient.class); @@ -1746,12 +1758,15 @@ public void testTransactionTagNotAllowedAfterTransactionStarted() { when(unitOfWork.executeQueryAsync( any(), any(ParsedStatement.class), any(AnalyzeMode.class), Mockito.any())) .thenReturn(ApiFutures.immediateFuture(mock(ResultSet.class))); - when(unitOfWork.rollbackAsync(any())).thenReturn(ApiFutures.immediateFuture(null)); + when(unitOfWork.rollbackAsync(any(), any())).thenReturn(ApiFutures.immediateFuture(null)); try (ConnectionImpl connection = new ConnectionImpl( connectionOptions, spannerPool, ddlClient, dbClient, mock(BatchClient.class)) { @Override - UnitOfWork createNewUnitOfWork(boolean isInternalMetadataQuery, boolean forceSingleUse) { + UnitOfWork createNewUnitOfWork( + boolean isInternalMetadataQuery, + boolean forceSingleUse, + StatementType statementType) { return unitOfWork; } }) { @@ -1873,37 +1888,37 @@ public void testSetRetryAbortsInternally() { .build())) { assertFalse("Read-only should be disabled by default", connection.isReadOnly()); assertTrue("Autocommit should be enabled by default", connection.isAutocommit()); - assertFalse( - "Retry aborts internally should be disabled by default on test connections", + assertTrue( + "Retry aborts internally should be enabled by default on test connections", connection.isRetryAbortsInternally()); // It should be possible to change this value also when in auto-commit mode. - connection.setRetryAbortsInternally(true); - assertTrue(connection.isRetryAbortsInternally()); + connection.setRetryAbortsInternally(false); + assertFalse(connection.isRetryAbortsInternally()); // It should be possible to change this value also when in transactional mode, as long as // there is no active transaction. connection.setAutocommit(false); - connection.setRetryAbortsInternally(false); - assertFalse(connection.isRetryAbortsInternally()); + connection.setRetryAbortsInternally(true); + assertTrue(connection.isRetryAbortsInternally()); // It should be possible to change the value when in read-only mode. connection.setReadOnly(true); - connection.setRetryAbortsInternally(true); - assertTrue(connection.isRetryAbortsInternally()); + connection.setRetryAbortsInternally(false); + assertFalse(connection.isRetryAbortsInternally()); // It should not be possible to change the value when there is an active transaction. connection.setReadOnly(false); connection.setAutocommit(false); connection.execute(Statement.of(SELECT)); - assertThrows(SpannerException.class, () -> connection.setRetryAbortsInternally(false)); + assertThrows(SpannerException.class, () -> connection.setRetryAbortsInternally(true)); // Verify that the value did not change. - assertTrue(connection.isRetryAbortsInternally()); + assertFalse(connection.isRetryAbortsInternally()); // Rolling back the connection should allow us to set the property again. connection.rollback(); - connection.setRetryAbortsInternally(false); - assertFalse(connection.isRetryAbortsInternally()); + connection.setRetryAbortsInternally(true); + assertTrue(connection.isRetryAbortsInternally()); } } @@ -1921,4 +1936,102 @@ private void assertThrowResultNotAllowed( .contains( "Only statements that return a result of one of the following types are allowed")); } + + @Test + public void testProtoDescriptorsAlwaysAllowed() { + ConnectionOptions connectionOptions = mock(ConnectionOptions.class); + when(connectionOptions.isAutocommit()).thenReturn(true); + when(connectionOptions.getInitialConnectionPropertyValues()).thenReturn(ImmutableMap.of()); + SpannerPool spannerPool = mock(SpannerPool.class); + DdlClient ddlClient = mock(DdlClient.class); + DatabaseClient dbClient = mock(DatabaseClient.class); + when(dbClient.getDialect()).thenReturn(Dialect.GOOGLE_STANDARD_SQL); + final UnitOfWork unitOfWork = mock(UnitOfWork.class); + final String protoDescriptorsFilePath = + "src/test/resources/com/google/cloud/spanner/descriptors.pb"; + when(unitOfWork.executeDdlAsync(any(), any(ParsedStatement.class))) + .thenReturn(ApiFutures.immediateFuture(null)); + when(unitOfWork.executeQueryAsync( + any(), any(ParsedStatement.class), any(AnalyzeMode.class), Mockito.any())) + .thenReturn(ApiFutures.immediateFuture(mock(ResultSet.class))); + try (ConnectionImpl connection = + new ConnectionImpl( + connectionOptions, spannerPool, ddlClient, dbClient, mock(BatchClient.class)) { + @Override + UnitOfWork getCurrentUnitOfWorkOrStartNewUnitOfWork( + StatementType statementType, boolean isInternalMetadataQuery) { + return unitOfWork; + } + }) { + byte[] protoDescriptors; + try { + InputStream in = + ConnectionImplTest.class + .getClassLoader() + .getResourceAsStream("com/google/cloud/spanner/descriptors.pb"); + assertNotNull(in); + protoDescriptors = ByteStreams.toByteArray(in); + } catch (Exception e) { + throw SpannerExceptionFactory.newSpannerException(e); + } + + assertTrue(connection.isAutocommit()); + + assertNull(connection.getProtoDescriptors()); + connection.setProtoDescriptors(protoDescriptors); + assertArrayEquals(protoDescriptors, connection.getProtoDescriptors()); + + connection.setAutocommit(false); + + connection.setProtoDescriptors(protoDescriptors); + assertArrayEquals(protoDescriptors, connection.getProtoDescriptors()); + + // proto descriptor should reset after executing a DDL statement + connection.setProtoDescriptors(protoDescriptors); + assertArrayEquals(protoDescriptors, connection.getProtoDescriptors()); + connection.execute(Statement.of("CREATE PROTO BUNDLE (examples.spanner.music.SingerInfo)")); + assertNull(connection.getProtoDescriptors()); + + // proto descriptor should not reset if the statement is not a DDL statement + connection.setProtoDescriptors(protoDescriptors); + assertArrayEquals(protoDescriptors, connection.getProtoDescriptors()); + connection.execute(Statement.of("SELECT FOO FROM BAR")); + assertArrayEquals(protoDescriptors, connection.getProtoDescriptors()); + + // proto descriptor file path should reset after executing a DDL statement + connection.setProtoDescriptorsFilePath(protoDescriptorsFilePath); + assertArrayEquals(protoDescriptors, connection.getProtoDescriptors()); + connection.execute(Statement.of("CREATE PROTO BUNDLE (examples.spanner.music.SingerInfo)")); + assertNull(connection.getProtoDescriptors()); + assertNull(connection.getProtoDescriptorsFilePath()); + + // proto descriptor file path should not reset if the statement is not a DDL statement + connection.setProtoDescriptorsFilePath(protoDescriptorsFilePath); + assertArrayEquals(protoDescriptors, connection.getProtoDescriptors()); + connection.execute(Statement.of("SELECT FOO FROM BAR")); + assertArrayEquals(protoDescriptors, connection.getProtoDescriptors()); + assertEquals(protoDescriptorsFilePath, connection.getProtoDescriptorsFilePath()); + + // test proto descriptor file path as input + connection.setProtoDescriptorsFilePath(protoDescriptorsFilePath); + assertArrayEquals(protoDescriptors, connection.getProtoDescriptors()); + connection.execute(Statement.of("CREATE PROTO BUNDLE (examples.spanner.music.SingerInfo)")); + assertNull(connection.getProtoDescriptors()); + + // proto descriptor set through file path should overwrite the proto descriptor set from + // byte[] + connection.setProtoDescriptors("protoDescriptors".getBytes()); + connection.setProtoDescriptorsFilePath(protoDescriptorsFilePath); + assertArrayEquals(protoDescriptors, connection.getProtoDescriptors()); + connection.execute(Statement.of("CREATE PROTO BUNDLE (examples.spanner.music.SingerInfo)")); + assertNull(connection.getProtoDescriptors()); + + // proto descriptor set through byte[] should overwrite the proto descriptor from file path + connection.setProtoDescriptorsFilePath(protoDescriptorsFilePath); + connection.setProtoDescriptors("protoDescriptors".getBytes()); + assertArrayEquals("protoDescriptors".getBytes(), connection.getProtoDescriptors()); + connection.execute(Statement.of("CREATE PROTO BUNDLE (examples.spanner.music.SingerInfo)")); + assertNull(connection.getProtoDescriptors()); + } + } } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionOptionsTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionOptionsTest.java index 53b9f3d826f..94a44579acf 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionOptionsTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionOptionsTest.java @@ -1165,4 +1165,29 @@ public void testMaxCommitDelay() { .build() .getMaxCommitDelay()); } + + @Test + public void testEnableApiTracing() { + assertNull( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database") + .setCredentials(NoCredentials.getInstance()) + .build() + .isEnableApiTracing()); + assertTrue( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?enableApiTracing=true") + .setCredentials(NoCredentials.getInstance()) + .build() + .isEnableApiTracing()); + assertFalse( + ConnectionOptions.newBuilder() + .setUri( + "cloudspanner:/projects/test-project-123/instances/test-instance/databases/test-database?enableApiTracing=false") + .setCredentials(NoCredentials.getInstance()) + .build() + .isEnableApiTracing()); + } } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionPropertyTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionPropertyTest.java new file mode 100644 index 00000000000..0888f61cf90 --- /dev/null +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionPropertyTest.java @@ -0,0 +1,301 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.ConnectionProperty.create; +import static com.google.cloud.spanner.connection.ConnectionProperty.createKey; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.NonNegativeIntegerConverter; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.StringValueConverter; +import com.google.cloud.spanner.connection.ConnectionProperty.Context; +import java.util.Objects; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ConnectionPropertyTest { + + @Test + public void testCreateKey() { + assertEquals("my_property", createKey(/* extension = */ null, "my_property")); + assertEquals("my_property", createKey(/* extension = */ null, "My_Property")); + assertEquals("my_property", createKey(/* extension = */ null, "MY_PROPERTY")); + assertEquals("my_extension.my_property", createKey("my_extension", "my_property")); + assertEquals("my_extension.my_property", createKey("My_Extension", "My_Property")); + assertEquals("my_extension.my_property", createKey("MY_EXTENSION", "MY_PROPERTY")); + + //noinspection DataFlowIssue + assertThrows(SpannerException.class, () -> createKey("my_extension", /* name = */ null)); + assertThrows(SpannerException.class, () -> createKey("my_extension", "")); + } + + @Test + public void testCreate() { + ConnectionProperty property = + create( + "my_property", + "Description of my_property", + "default_value", + StringValueConverter.INSTANCE, + Context.USER); + assertEquals("my_property", property.getName()); + assertEquals("Description of my_property", property.getDescription()); + assertEquals("default_value", property.getDefaultValue()); + assertEquals("my_value", Objects.requireNonNull(property.convert("my_value")).getValue()); + assertEquals(property.getContext(), Context.USER); + assertEquals("my_property", property.getKey()); + + ConnectionProperty startupProperty = + create( + "STARTUP_PROPERTY", + "Description of STARTUP_PROPERTY", + 1, + NonNegativeIntegerConverter.INSTANCE, + Context.STARTUP); + // The name is folded to lower-case. + assertEquals("startup_property", startupProperty.getName()); + assertEquals("Description of STARTUP_PROPERTY", startupProperty.getDescription()); + assertEquals(Integer.valueOf(1), startupProperty.getDefaultValue()); + assertEquals( + Integer.valueOf(2), Objects.requireNonNull(startupProperty.convert("2")).getValue()); + assertEquals(startupProperty.getContext(), Context.STARTUP); + assertEquals("startup_property", startupProperty.getKey()); + } + + @Test + public void testEquals() { + ConnectionProperty property1 = + new ConnectionProperty<>( + /* extension = */ null, + "my_property", + "Description of property1", + "default_value_1", + null, + StringValueConverter.INSTANCE, + Context.STARTUP); + ConnectionProperty property2 = + new ConnectionProperty<>( + /* extension = */ null, + "my_property", + "Description of property2", + "default_value_2", + null, + StringValueConverter.INSTANCE, + Context.USER); + ConnectionProperty property3 = + new ConnectionProperty<>( + "my_extension", + "my_property", + "Description of property3", + "default_value_3", + null, + StringValueConverter.INSTANCE, + Context.STARTUP); + ConnectionProperty property4 = + new ConnectionProperty<>( + "my_extension", + "my_property", + "Description of property4", + "default_value_4", + null, + StringValueConverter.INSTANCE, + Context.USER); + ConnectionProperty property5 = + new ConnectionProperty<>( + /* extension = */ null, + "my_other_property", + "Description of property5", + "default_value_5", + null, + StringValueConverter.INSTANCE, + Context.STARTUP); + ConnectionProperty property6 = + new ConnectionProperty<>( + "my_extension", + "my_other_property", + "Description of property6", + "default_value_6", + null, + StringValueConverter.INSTANCE, + Context.STARTUP); + ConnectionProperty property7 = + new ConnectionProperty<>( + /* extension = */ null, + "MY_PROPERTY", + "Description of property7", + "default_value_7", + null, + StringValueConverter.INSTANCE, + Context.STARTUP); + ConnectionProperty property8 = + new ConnectionProperty<>( + "MY_EXTENSION", + "my_property", + "Description of property8", + "default_value_8", + null, + StringValueConverter.INSTANCE, + Context.STARTUP); + ConnectionProperty property9 = + new ConnectionProperty<>( + "my_extension", + "MY_PROPERTY", + "Description of property9", + "default_value_9", + null, + StringValueConverter.INSTANCE, + Context.STARTUP); + + // Equality is based only on the key. + // The key is the lower case combination of extension and name. + // If extension is null, then only the name is the key. + + // property1 = my_property + assertEquals(property1, property2); + assertNotEquals(property1, property3); + assertNotEquals(property1, property4); + assertNotEquals(property1, property5); + assertNotEquals(property1, property6); + assertEquals(property1, property7); + assertNotEquals(property1, property8); + assertNotEquals(property1, property9); + + // property2 = my_property + assertEquals(property2, property1); + assertNotEquals(property2, property3); + assertNotEquals(property2, property4); + assertNotEquals(property2, property5); + assertNotEquals(property2, property6); + assertEquals(property2, property7); + assertNotEquals(property2, property8); + assertNotEquals(property2, property9); + + // property3 = my_extension.my_property + assertNotEquals(property3, property1); + assertNotEquals(property3, property2); + assertEquals(property3, property4); + assertNotEquals(property3, property5); + assertNotEquals(property3, property6); + assertNotEquals(property3, property7); + assertEquals(property3, property8); + assertEquals(property3, property9); + + // property4 = my_extension.my_property + assertNotEquals(property4, property1); + assertNotEquals(property4, property2); + assertEquals(property4, property3); + assertNotEquals(property4, property5); + assertNotEquals(property4, property6); + assertNotEquals(property4, property7); + assertEquals(property4, property8); + assertEquals(property4, property9); + + // property5 = my_other_property + assertNotEquals(property5, property1); + assertNotEquals(property5, property2); + assertNotEquals(property5, property3); + assertNotEquals(property5, property4); + assertNotEquals(property5, property6); + assertNotEquals(property5, property7); + assertNotEquals(property5, property8); + assertNotEquals(property5, property9); + + // property6 = my_extension.my_other_property + assertNotEquals(property6, property1); + assertNotEquals(property6, property2); + assertNotEquals(property6, property3); + assertNotEquals(property6, property4); + assertNotEquals(property6, property5); + assertNotEquals(property6, property7); + assertNotEquals(property6, property8); + assertNotEquals(property6, property9); + + // property7 = MY_PROPERTY (same as property1 and property2) + assertEquals(property7, property1); + assertEquals(property7, property2); + assertNotEquals(property7, property3); + assertNotEquals(property7, property4); + assertNotEquals(property7, property5); + assertNotEquals(property7, property6); + assertNotEquals(property7, property8); + assertNotEquals(property7, property9); + + // property8 = MY_EXTENSION.my_property (same as property4) + assertNotEquals(property8, property1); + assertNotEquals(property8, property2); + assertEquals(property8, property3); + assertEquals(property8, property4); + assertNotEquals(property8, property5); + assertNotEquals(property8, property6); + assertNotEquals(property8, property7); + assertEquals(property8, property9); + + // property9 = my_extension.MY_PROPERTY (same as property4 and property8) + assertNotEquals(property9, property1); + assertNotEquals(property9, property2); + assertEquals(property9, property3); + assertEquals(property9, property4); + assertNotEquals(property9, property5); + assertNotEquals(property9, property6); + assertNotEquals(property9, property7); + assertEquals(property9, property8); + } + + @Test + public void testConvert() { + ConnectionProperty property = + create( + "my_property", + "Description of my_property", + 1, + NonNegativeIntegerConverter.INSTANCE, + Context.STARTUP); + assertEquals(Integer.valueOf(100), Objects.requireNonNull(property.convert("100")).getValue()); + assertThrows(SpannerException.class, () -> property.convert("foo")); + assertThrows(SpannerException.class, () -> property.convert("-100")); + } + + @Test + public void testCreateInitialValue() { + ConnectionProperty property = + create( + "my_property", + "Description of my_property", + "default_value", + StringValueConverter.INSTANCE, + Context.USER); + + ConnectionPropertyValue initialValue = property.createInitialValue(null); + assertEquals(property.getDefaultValue(), initialValue.getValue()); + assertEquals(property.getDefaultValue(), initialValue.getResetValue()); + assertSame(initialValue.getProperty(), property); + + ConnectionPropertyValue startupValue = + new ConnectionPropertyValue<>(property, "other_value", "other_value"); + ConnectionPropertyValue initialValueWithStartupValue = + property.createInitialValue(startupValue); + assertEquals("other_value", initialValueWithStartupValue.getValue()); + assertEquals("other_value", initialValueWithStartupValue.getResetValue()); + assertSame(initialValueWithStartupValue.getProperty(), property); + } +} diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionPropertyValueTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionPropertyValueTest.java new file mode 100644 index 00000000000..d4f795185e4 --- /dev/null +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionPropertyValueTest.java @@ -0,0 +1,100 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.ConnectionProperties.AUTOCOMMIT_DML_MODE; +import static com.google.cloud.spanner.connection.ConnectionProperties.CONNECTION_STATE_TYPE; +import static com.google.cloud.spanner.connection.ConnectionProperties.READONLY; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.connection.ConnectionProperty.Context; +import com.google.cloud.spanner.connection.ConnectionState.Type; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ConnectionPropertyValueTest { + + @Test + public void testSetValue() { + // This value can be set at any time. + ConnectionPropertyValue value = READONLY.createInitialValue(null); + assertEquals(READONLY.getDefaultValue(), value.getValue()); + + value.setValue(Boolean.FALSE, Context.STARTUP); + assertEquals(Boolean.FALSE, value.getValue()); + + value.setValue(Boolean.TRUE, Context.USER); + assertEquals(Boolean.TRUE, value.getValue()); + + value.setValue(Boolean.FALSE, Context.USER); + assertEquals(Boolean.FALSE, value.getValue()); + + // This value may only be set outside transactions. + ConnectionPropertyValue outsideTransactionOnlyValue = + AUTOCOMMIT_DML_MODE.createInitialValue(null); + assertEquals(AUTOCOMMIT_DML_MODE.getDefaultValue(), outsideTransactionOnlyValue.getValue()); + + outsideTransactionOnlyValue.setValue(AutocommitDmlMode.PARTITIONED_NON_ATOMIC, Context.STARTUP); + assertEquals(AutocommitDmlMode.PARTITIONED_NON_ATOMIC, outsideTransactionOnlyValue.getValue()); + + outsideTransactionOnlyValue.setValue(AutocommitDmlMode.TRANSACTIONAL, Context.USER); + assertEquals(AutocommitDmlMode.TRANSACTIONAL, outsideTransactionOnlyValue.getValue()); + + // This value may only be set at startup. + ConnectionPropertyValue startupOnlyValue = + CONNECTION_STATE_TYPE.createInitialValue(null); + assertEquals(CONNECTION_STATE_TYPE.getDefaultValue(), startupOnlyValue.getValue()); + + startupOnlyValue.setValue(Type.TRANSACTIONAL, Context.STARTUP); + assertEquals(Type.TRANSACTIONAL, startupOnlyValue.getValue()); + + // This property may not be set after startup.. + assertThrows( + SpannerException.class, + () -> startupOnlyValue.setValue(Type.NON_TRANSACTIONAL, Context.USER)); + // The value should not have changed. + assertEquals(Type.TRANSACTIONAL, startupOnlyValue.getValue()); + + // This property may not be set in a transaction. + assertThrows( + SpannerException.class, + () -> startupOnlyValue.setValue(Type.NON_TRANSACTIONAL, Context.USER)); + // The value should not have changed. + assertEquals(Type.TRANSACTIONAL, startupOnlyValue.getValue()); + } + + @Test + public void testCopy() { + ConnectionPropertyValue value = + new ConnectionPropertyValue<>( + /* property = */ AUTOCOMMIT_DML_MODE, + /* resetValue = */ AutocommitDmlMode.PARTITIONED_NON_ATOMIC, + /* value = */ AutocommitDmlMode.TRANSACTIONAL); + ConnectionPropertyValue copy = value.copy(); + + assertEquals(value, copy); + assertNotSame(value, copy); + assertEquals(value.getProperty(), copy.getProperty()); + assertEquals(value.getValue(), copy.getValue()); + assertEquals(value.getResetValue(), copy.getResetValue()); + } +} diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStateMockServerTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStateMockServerTest.java new file mode 100644 index 00000000000..ea79a7132bf --- /dev/null +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStateMockServerTest.java @@ -0,0 +1,231 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.ConnectionProperties.CONNECTION_STATE_TYPE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.connection.ConnectionState.Type; +import com.google.cloud.spanner.connection.ITAbstractSpannerTest.ITConnection; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class ConnectionStateMockServerTest extends AbstractMockServerTest { + + @Parameters(name = "dialect = {0})") + public static Object[] data() { + return Dialect.values(); + } + + @Parameter public Dialect dialect; + + private Dialect currentDialect; + + @BeforeClass + public static void enableTransactionalConnectionStateForPostgreSQL() { + System.setProperty( + ConnectionOptions.ENABLE_TRANSACTIONAL_CONNECTION_STATE_FOR_POSTGRESQL_PROPERTY, "true"); + } + + @AfterClass + public static void disableTransactionalConnectionStateForPostgreSQL() { + System.clearProperty( + ConnectionOptions.ENABLE_TRANSACTIONAL_CONNECTION_STATE_FOR_POSTGRESQL_PROPERTY); + } + + @Before + public void setupDialect() { + if (currentDialect != dialect) { + // Reset the dialect result. + SpannerPool.closeSpannerPool(); + mockSpanner.putStatementResult(StatementResult.detectDialectResult(dialect)); + currentDialect = dialect; + } + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + } + + protected String getBaseUrl() { + return String.format( + "cloudspanner://localhost:%d/projects/proj/instances/inst/databases/db?usePlainText=true", + getPort()); + } + + ITConnection createConnection(ConnectionState.Type type) { + return createConnection(";" + CONNECTION_STATE_TYPE.getKey() + "=" + type.name()); + } + + @Test + public void testConnectionStateType() { + try (Connection connection = createConnection()) { + // The default ConnectionState.Type should depend on the dialect. + assertEquals( + dialect == Dialect.POSTGRESQL ? Type.TRANSACTIONAL : Type.NON_TRANSACTIONAL, + ((ConnectionImpl) connection).getConnectionStateType()); + } + // It should be possible to override the default ConnectionState.Type, irrespective of the + // database dialect. + try (Connection connection = createConnection(Type.TRANSACTIONAL)) { + assertEquals(Type.TRANSACTIONAL, ((ConnectionImpl) connection).getConnectionStateType()); + } + try (Connection connection = createConnection(Type.NON_TRANSACTIONAL)) { + assertEquals(Type.NON_TRANSACTIONAL, ((ConnectionImpl) connection).getConnectionStateType()); + } + } + + @Test + public void testAutocommitPersistsConnectionState() { + try (Connection connection = createConnection(";autocommit=true")) { + assertTrue(connection.isAutocommit()); + + assertEquals(AutocommitDmlMode.TRANSACTIONAL, connection.getAutocommitDmlMode()); + connection.setAutocommitDmlMode(AutocommitDmlMode.PARTITIONED_NON_ATOMIC); + assertEquals(AutocommitDmlMode.PARTITIONED_NON_ATOMIC, connection.getAutocommitDmlMode()); + } + } + + @Test + public void testNonTransactionalState_commitsAutomatically() { + try (Connection connection = + createConnection(";connection_state_type=non_transactional;autocommit=false")) { + assertEquals(((ConnectionImpl) connection).getConnectionStateType(), Type.NON_TRANSACTIONAL); + assertFalse(connection.isAutocommit()); + + // Verify the initial default value. + assertFalse(connection.isReturnCommitStats()); + + // Change the value and read it back in the same transaction. + connection.setReturnCommitStats(true); + assertTrue(connection.isReturnCommitStats()); + + // Rolling back should not have any impact on the connection state, as the connection state is + // non-transactional. + connection.rollback(); + assertTrue(connection.isReturnCommitStats()); + + // Verify that the behavior is the same with autocommit=true and a temporary transaction. + assertTrue(connection.isReturnCommitStats()); + connection.setAutocommit(true); + connection.beginTransaction(); + connection.setReturnCommitStats(false); + assertFalse(connection.isReturnCommitStats()); + connection.rollback(); + assertFalse(connection.isReturnCommitStats()); + } + } + + @Test + public void testTransactionalState_rollBacksConnectionState() { + try (Connection connection = + createConnection(";connection_state_type=transactional;autocommit=false")) { + assertEquals(((ConnectionImpl) connection).getConnectionStateType(), Type.TRANSACTIONAL); + assertFalse(connection.isAutocommit()); + + // Verify the initial default value. + assertFalse(connection.isReturnCommitStats()); + + // Change the value and read it back in the same transaction. + connection.setReturnCommitStats(true); + assertTrue(connection.isReturnCommitStats()); + + // Rolling back will undo the connection state change. + connection.rollback(); + assertFalse(connection.isReturnCommitStats()); + + // Verify that the behavior is the same with autocommit=true and a temporary transaction. + assertFalse(connection.isReturnCommitStats()); + connection.setAutocommit(true); + connection.beginTransaction(); + connection.setReturnCommitStats(true); + assertTrue(connection.isReturnCommitStats()); + connection.rollback(); + assertFalse(connection.isReturnCommitStats()); + } + } + + @Test + public void testTransactionalState_commitsConnectionState() { + try (Connection connection = + createConnection(";connection_state_type=transactional;autocommit=false")) { + assertEquals(((ConnectionImpl) connection).getConnectionStateType(), Type.TRANSACTIONAL); + assertFalse(connection.isAutocommit()); + + // Verify the initial default value. + assertFalse(connection.isReturnCommitStats()); + + // Change the value and read it back in the same transaction. + connection.setReturnCommitStats(true); + assertTrue(connection.isReturnCommitStats()); + + // Committing will persist the connection state change. + connection.commit(); + assertTrue(connection.isReturnCommitStats()); + + // Verify that the behavior is the same with autocommit=true and a temporary transaction. + assertTrue(connection.isReturnCommitStats()); + connection.setAutocommit(true); + connection.beginTransaction(); + connection.setReturnCommitStats(false); + assertFalse(connection.isReturnCommitStats()); + connection.commit(); + assertFalse(connection.isReturnCommitStats()); + } + } + + @Test + public void testLocalChangeIsLostAfterTransaction() { + // SET LOCAL ... has the same effect regardless of connection state type. + for (ConnectionState.Type type : Type.values()) { + try (ConnectionImpl connection = (ConnectionImpl) createConnection()) { + assertTrue(connection.isAutocommit()); + + for (boolean commit : new boolean[] {true, false}) { + // Verify the initial default value. + assertFalse(connection.isReturnCommitStats()); + + connection.beginTransaction(); + // Change the value and read it back in the same transaction. + connection.setReturnCommitStats(true, /* local = */ true); + assertTrue(connection.isReturnCommitStats()); + // Both rolling back and committing will undo the connection state change. + if (commit) { + connection.commit(); + } else { + connection.rollback(); + } + // The local change should now be undone. + assertFalse(connection.isReturnCommitStats()); + } + } + } + } +} diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStateTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStateTest.java new file mode 100644 index 00000000000..7d613a3eef9 --- /dev/null +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStateTest.java @@ -0,0 +1,267 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.ConnectionProperties.AUTOCOMMIT_DML_MODE; +import static com.google.cloud.spanner.connection.ConnectionProperties.CONNECTION_STATE_TYPE; +import static com.google.cloud.spanner.connection.ConnectionProperties.READONLY; +import static com.google.cloud.spanner.connection.ConnectionProperties.RETRY_ABORTS_INTERNALLY; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.NoCredentials; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.connection.ConnectionProperty.Context; +import com.google.cloud.spanner.connection.ConnectionState.Type; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class ConnectionStateTest { + + @Parameters(name = "connectionStateType = {0}") + public static Object[] data() { + return ConnectionState.Type.values(); + } + + @SuppressWarnings("ClassEscapesDefinedScope") + @Parameter + public ConnectionState.Type connectionStateType; + + ConnectionState getNonTransactionalState() { + return new ConnectionState( + createConnectionOptionsBuilder().build().getInitialConnectionPropertyValues()); + } + + ConnectionState getTransactionalState() { + return new ConnectionState( + createConnectionOptionsBuilder() + .setConnectionPropertyValue(CONNECTION_STATE_TYPE, Type.TRANSACTIONAL) + .build() + .getInitialConnectionPropertyValues()); + } + + ConnectionOptions.Builder createConnectionOptionsBuilder() { + return ConnectionOptions.newBuilder() + .setUri("cloudspanner:/projects/p/instances/i/databases/d") + .setCredentials(NoCredentials.getInstance()); + } + + ConnectionState getConnectionState() { + return connectionStateType == Type.TRANSACTIONAL + ? getTransactionalState() + : getNonTransactionalState(); + } + + @Test + public void testSetOutsideTransaction() { + ConnectionState state = getConnectionState(); + assertEquals(connectionStateType, state.getType()); + + assertEquals(false, state.getValue(READONLY).getValue()); + state.setValue(READONLY, true, Context.USER, /* inTransaction = */ false); + assertEquals(true, state.getValue(READONLY).getValue()); + } + + @Test + public void testSetToNullOutsideTransaction() { + ConnectionState state = getConnectionState(); + assertEquals(AutocommitDmlMode.TRANSACTIONAL, state.getValue(AUTOCOMMIT_DML_MODE).getValue()); + state.setValue(AUTOCOMMIT_DML_MODE, null, Context.USER, /* inTransaction = */ false); + assertNull(state.getValue(AUTOCOMMIT_DML_MODE).getValue()); + } + + @Test + public void testSetInTransactionCommit() { + ConnectionState state = getConnectionState(); + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + state.setValue(RETRY_ABORTS_INTERNALLY, false, Context.USER, /* inTransaction = */ true); + assertEquals(false, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + + // Verify that the change is persisted if the transaction is committed. + state.commit(); + assertEquals(false, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + } + + @Test + public void testSetInTransactionRollback() { + ConnectionState state = getConnectionState(); + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + state.setValue(RETRY_ABORTS_INTERNALLY, false, Context.USER, /* inTransaction = */ true); + assertEquals(false, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + + // Verify that the change is rolled back if the transaction is rolled back and the connection + // state is transactional. + state.rollback(); + // The value should rolled back to true if the state is transactional. + // The value should (still) be false if the state is non-transactional. + boolean expectedValue = connectionStateType == Type.TRANSACTIONAL; + assertEquals(expectedValue, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + } + + @Test + public void testResetInTransactionCommit() { + ConnectionState state = getConnectionState(); + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + state.setValue(RETRY_ABORTS_INTERNALLY, false, Context.USER, /* inTransaction = */ true); + assertEquals(false, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + state.commit(); + + // Reset the value to the default (true). + state.resetValue(RETRY_ABORTS_INTERNALLY, Context.USER, /* inTransaction = */ true); + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + + // Verify that the change is persisted if the transaction is committed. + state.commit(); + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + } + + @Test + public void testResetInTransactionRollback() { + ConnectionState state = getConnectionState(); + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + state.setValue(RETRY_ABORTS_INTERNALLY, false, Context.USER, /* inTransaction = */ true); + assertEquals(false, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + state.commit(); + + // Reset the value to the default (true). + state.resetValue(RETRY_ABORTS_INTERNALLY, Context.USER, /* inTransaction = */ true); + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + + // Verify that the change is rolled back if the transaction is rolled back and the connection + // state is transactional. + state.rollback(); + // The value should rolled back to false if the state is transactional. + // The value should (still) be true if the state is non-transactional. + boolean expectedValue = connectionStateType != Type.TRANSACTIONAL; + assertEquals(expectedValue, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + } + + @Test + public void testSetLocal() { + ConnectionState state = getConnectionState(); + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + state.setLocalValue(RETRY_ABORTS_INTERNALLY, false); + assertEquals(false, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + + // Verify that the change is no longer visible once the transaction has ended, even if the + // transaction was committed. + state.commit(); + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + } + + @Test + public void testSetLocalForStartupProperty() { + ConnectionState state = getConnectionState(); + SpannerException exception = + assertThrows( + SpannerException.class, + () -> state.setLocalValue(CONNECTION_STATE_TYPE, Type.TRANSACTIONAL)); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + } + + @Test + public void testSetInTransactionForStartupProperty() { + ConnectionState state = getConnectionState(); + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + state.setValue( + CONNECTION_STATE_TYPE, + Type.TRANSACTIONAL, + Context.USER, + /* inTransaction = */ true)); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + } + + @Test + public void testSetStartupOnlyProperty() { + ConnectionState state = getConnectionState(); + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + state.setValue( + CONNECTION_STATE_TYPE, + Type.TRANSACTIONAL, + Context.USER, + /* inTransaction = */ false)); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + } + + @Test + public void testReset() { + ConnectionState state = getConnectionState(); + // The default should be true. + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + state.setValue(RETRY_ABORTS_INTERNALLY, false, Context.USER, /* inTransaction = */ false); + assertEquals(false, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + + // Resetting the property should reset it to the default value. + state.resetValue(RETRY_ABORTS_INTERNALLY, Context.USER, /* inTransaction = */ false); + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + } + + @Test + public void testResetInTransaction() { + ConnectionState state = getConnectionState(); + // The default should be true. + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + state.setValue(RETRY_ABORTS_INTERNALLY, false, Context.USER, /* inTransaction = */ true); + assertEquals(false, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + state.commit(); + + // Resetting the property should reset it to the default value. + state.resetValue(RETRY_ABORTS_INTERNALLY, Context.USER, /* inTransaction = */ true); + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + } + + @Test + public void testResetStartupOnlyProperty() { + ConnectionState state = getConnectionState(); + SpannerException exception = + assertThrows( + SpannerException.class, + () -> + state.resetValue(CONNECTION_STATE_TYPE, Context.USER, /* inTransaction = */ false)); + assertEquals(ErrorCode.FAILED_PRECONDITION, exception.getErrorCode()); + } + + @Test + public void testInitialValueInConnectionUrl() { + ConnectionOptions options = + ConnectionOptions.newBuilder() + .setUri("cloudspanner:/projects/p/instances/i/databases/d?retryAbortsInternally=false") + .setCredentials(NoCredentials.getInstance()) + .build(); + ConnectionState state = new ConnectionState(options.getInitialConnectionPropertyValues()); + + assertEquals(false, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + state.setValue(RETRY_ABORTS_INTERNALLY, true, Context.USER, /* inTransaction = */ false); + assertEquals(true, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + + // Resetting the property should reset it to the value that was set in the connection URL. + state.resetValue(RETRY_ABORTS_INTERNALLY, Context.USER, /* inTransaction = */ false); + assertEquals(false, state.getValue(RETRY_ABORTS_INTERNALLY).getValue()); + } +} diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStatementExecutorTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStatementExecutorTest.java index bf7822e3285..3a5aa1e6d82 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStatementExecutorTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStatementExecutorTest.java @@ -31,7 +31,7 @@ import com.google.cloud.spanner.TimestampBound; import com.google.cloud.spanner.connection.PgTransactionMode.AccessMode; import com.google.cloud.spanner.connection.PgTransactionMode.IsolationLevel; -import com.google.protobuf.Duration; +import java.time.Duration; import java.util.concurrent.TimeUnit; import org.junit.Before; import org.junit.Test; @@ -128,11 +128,11 @@ public void testStatementGetReadTimestamp() { @Test public void testStatementGetStatementTimeout() { - subject.statementSetStatementTimeout(Duration.newBuilder().setSeconds(1L).build()); + subject.statementSetStatementTimeout(Duration.ofSeconds(1L)); when(connection.hasStatementTimeout()).thenReturn(true); subject.statementShowStatementTimeout(); verify(connection, atLeastOnce()).getStatementTimeout(any(TimeUnit.class)); - subject.statementSetStatementTimeout(Duration.getDefaultInstance()); + subject.statementSetStatementTimeout(Duration.ZERO); when(connection.hasStatementTimeout()).thenReturn(false); } @@ -212,7 +212,7 @@ public void testStatementSetOptimizerStatisticsPackage() { @Test public void testStatementSetStatementTimeout() { - subject.statementSetStatementTimeout(Duration.newBuilder().setNanos(100).build()); + subject.statementSetStatementTimeout(Duration.ofNanos(100)); verify(connection).setStatementTimeout(100L, TimeUnit.NANOSECONDS); } @@ -252,4 +252,29 @@ public void testStatementSetPgTransactionModeNoOp() { verify(connection, never()).setTransactionMode(TransactionMode.READ_ONLY_TRANSACTION); verify(connection, never()).setTransactionMode(TransactionMode.READ_WRITE_TRANSACTION); } + + @Test + public void testStatementSetProtoDescriptors() { + subject.statementSetProtoDescriptors("protoDescriptor".getBytes()); + verify(connection).setProtoDescriptors("protoDescriptor".getBytes()); + } + + @Test + public void testStatementSetProtoDescriptorsFilePath() { + String filePath = "com/google/cloud/spanner/descriptors.pb"; + subject.statementSetProtoDescriptorsFilePath(filePath); + verify(connection).setProtoDescriptorsFilePath(filePath); + } + + @Test + public void testStatementGetProtoDescriptors() { + subject.statementShowProtoDescriptors(); + verify(connection).getProtoDescriptors(); + } + + @Test + public void testStatementGetProtoDescriptorsFilePath() { + subject.statementShowProtoDescriptorsFilePath(); + verify(connection).getProtoDescriptorsFilePath(); + } } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStatementWithOneParameterTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStatementWithOneParameterTest.java index f4044316fc7..72a8e64ae4c 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStatementWithOneParameterTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionStatementWithOneParameterTest.java @@ -29,7 +29,7 @@ import com.google.cloud.spanner.Statement; import com.google.cloud.spanner.TimestampBound; import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; -import com.google.protobuf.Duration; +import java.time.Duration; import java.util.concurrent.TimeUnit; import org.junit.Before; import org.junit.Test; diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionTest.java index c52be0e4d09..c5b34982255 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ConnectionTest.java @@ -30,22 +30,31 @@ import com.google.cloud.spanner.ErrorCode; import com.google.cloud.spanner.MockSpannerServiceImpl; import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.Options.RpcPriority; import com.google.cloud.spanner.ResultSet; import com.google.cloud.spanner.SpannerException; import com.google.cloud.spanner.SpannerOptions; import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.TimestampBound; import com.google.common.collect.ImmutableList; import com.google.spanner.v1.BatchCreateSessionsRequest; import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.DirectedReadOptions; +import com.google.spanner.v1.DirectedReadOptions.ExcludeReplicas; +import com.google.spanner.v1.DirectedReadOptions.ReplicaSelection; import com.google.spanner.v1.ExecuteBatchDmlRequest; import com.google.spanner.v1.ExecuteSqlRequest; import com.google.spanner.v1.ExecuteSqlRequest.QueryOptions; import com.google.spanner.v1.RequestOptions; +import java.nio.charset.StandardCharsets; +import java.time.Duration; import java.util.Arrays; import java.util.Collections; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.function.Consumer; +import java.util.function.Supplier; import javax.annotation.Nonnull; import org.junit.After; import org.junit.AfterClass; @@ -228,6 +237,150 @@ public void testBatchUpdateAborted() { } } } + + @Test + public void testReset() { + try (ConnectionImpl connection = (ConnectionImpl) createConnection()) { + assertResetBooleanProperty( + connection, + true, + connection::setRetryAbortsInternally, + connection::isRetryAbortsInternally); + assertResetBooleanProperty( + connection, false, connection::setReadOnly, connection::isReadOnly); + assertResetBooleanProperty( + connection, false, connection::setAutocommit, connection::isAutocommit); + assertResetBooleanProperty( + connection, false, connection::setReturnCommitStats, connection::isReturnCommitStats); + assertResetBooleanProperty( + connection, + false, + connection::setDelayTransactionStartUntilFirstWrite, + connection::isDelayTransactionStartUntilFirstWrite); + assertResetBooleanProperty( + connection, + false, + connection::setKeepTransactionAlive, + connection::isKeepTransactionAlive); + assertResetBooleanProperty( + connection, false, connection::setDataBoostEnabled, connection::isDataBoostEnabled); + assertResetBooleanProperty( + connection, false, connection::setAutoPartitionMode, connection::isAutoPartitionMode); + assertResetBooleanProperty( + connection, + false, + connection::setExcludeTxnFromChangeStreams, + connection::isExcludeTxnFromChangeStreams); + + assertResetProperty( + connection, "", "1", connection::setOptimizerVersion, connection::getOptimizerVersion); + assertResetProperty( + connection, + null, + RpcPriority.LOW, + connection::setRPCPriority, + connection::getRPCPriority); + assertResetProperty( + connection, + DdlInTransactionMode.ALLOW_IN_EMPTY_TRANSACTION, + DdlInTransactionMode.AUTO_COMMIT_TRANSACTION, + connection::setDdlInTransactionMode, + connection::getDdlInTransactionMode); + assertResetProperty( + connection, 0, 4, connection::setMaxPartitions, connection::getMaxPartitions); + assertResetProperty( + connection, + 1, + 8, + connection::setMaxPartitionedParallelism, + connection::getMaxPartitionedParallelism); + assertResetProperty( + connection, + null, + Duration.ofMillis(20), + connection::setMaxCommitDelay, + connection::getMaxCommitDelay); + assertResetProperty( + connection, + TimestampBound.strong(), + TimestampBound.ofExactStaleness(10L, TimeUnit.SECONDS), + connection::setReadOnlyStaleness, + connection::getReadOnlyStaleness); + assertResetProperty( + connection, null, "tag", connection::setStatementTag, connection::getStatementTag); + assertResetProperty( + connection, null, "tag", connection::setTransactionTag, connection::getTransactionTag); + assertResetProperty( + connection, + null, + DirectedReadOptions.newBuilder() + .setExcludeReplicas( + ExcludeReplicas.newBuilder() + .addReplicaSelections( + ReplicaSelection.newBuilder().setLocation("foo").build()) + .build()) + .build(), + connection::setDirectedRead, + connection::getDirectedRead); + assertResetProperty( + connection, + SavepointSupport.FAIL_AFTER_ROLLBACK, + SavepointSupport.ENABLED, + connection::setSavepointSupport, + connection::getSavepointSupport); + assertResetProperty( + connection, + null, + "descriptor".getBytes(StandardCharsets.UTF_8), + connection::setProtoDescriptors, + connection::getProtoDescriptors); + assertResetProperty( + connection, + null, + "filename", + connection::setProtoDescriptorsFilePath, + connection::getProtoDescriptorsFilePath); + + // Test the AutocommitDmlMode property that is only supported in auto-commit mode. + connection.rollback(); + connection.setAutocommit(true); + assertResetProperty( + connection, + AutocommitDmlMode.TRANSACTIONAL, + AutocommitDmlMode.PARTITIONED_NON_ATOMIC, + connection::setAutocommitDmlMode, + connection::getAutocommitDmlMode); + connection.setAutocommit(false); + + // Statement timeouts use a customer getter/setter, so we need to manually test that. + assertEquals(0L, connection.getStatementTimeout(TimeUnit.MILLISECONDS)); + connection.setStatementTimeout(10L, TimeUnit.SECONDS); + assertEquals(10L, connection.getStatementTimeout(TimeUnit.SECONDS)); + connection.reset(); + assertEquals(0L, connection.getStatementTimeout(TimeUnit.MILLISECONDS)); + } + } + + private void assertResetBooleanProperty( + ConnectionImpl connection, + boolean defaultValue, + Consumer setter, + Supplier getter) { + assertResetProperty(connection, defaultValue, !defaultValue, setter, getter); + } + + private void assertResetProperty( + ConnectionImpl connection, + T defaultValue, + T alternativeValue, + Consumer setter, + Supplier getter) { + assertEquals(defaultValue, getter.get()); + setter.accept(alternativeValue); + assertEquals(alternativeValue, getter.get()); + connection.reset(); + assertEquals(defaultValue, getter.get()); + } } public static class ConnectionMinSessionsTest extends AbstractMockServerTest { diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/CredentialsProviderTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/CredentialsProviderTest.java index 944e4adeb2d..9e2979e1aaf 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/CredentialsProviderTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/CredentialsProviderTest.java @@ -21,9 +21,11 @@ import com.google.api.gax.core.CredentialsProvider; import com.google.auth.Credentials; +import com.google.auth.oauth2.AccessToken; import com.google.auth.oauth2.OAuth2Credentials; import io.grpc.ManagedChannelBuilder; import java.io.ObjectStreamException; +import java.util.Date; import java.util.concurrent.atomic.AtomicInteger; import org.junit.BeforeClass; import org.junit.Test; @@ -50,6 +52,14 @@ private Object readResolve() throws ObjectStreamException { return this; } + @Override + public AccessToken refreshAccessToken() { + return AccessToken.newBuilder() + .setTokenValue("foo") + .setExpirationTime(new Date(Long.MAX_VALUE)) + .build(); + } + public boolean equals(Object obj) { if (!(obj instanceof TestCredentials)) { return false; diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DdlBatchTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DdlBatchTest.java index 5ef4c5291d1..93ae60891fb 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DdlBatchTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DdlBatchTest.java @@ -21,9 +21,12 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertThrows; import static org.junit.Assert.fail; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.argThat; @@ -48,9 +51,12 @@ import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; import com.google.cloud.spanner.connection.UnitOfWork.CallType; import com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState; +import com.google.common.io.ByteStreams; import com.google.protobuf.Timestamp; import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; import io.grpc.Status; +import io.opentelemetry.api.trace.Span; +import java.io.InputStream; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -107,8 +113,8 @@ private DdlClient createDefaultMockDdlClient( ApiFuture metadataFuture = ApiFutures.immediateFuture(metadataBuilder.build()); when(operation.getMetadata()).thenReturn(metadataFuture); - when(ddlClient.executeDdl(anyString())).thenReturn(operation); - when(ddlClient.executeDdl(anyList())).thenReturn(operation); + when(ddlClient.executeDdl(anyString(), any())).thenReturn(operation); + when(ddlClient.executeDdl(anyList(), any())).thenReturn(operation); return ddlClient; } catch (Exception e) { throw new RuntimeException(e); @@ -128,6 +134,7 @@ private DdlBatch createSubject(DdlClient ddlClient, DatabaseClient dbClient) { .setDdlClient(ddlClient) .setDatabaseClient(dbClient) .withStatementExecutor(new StatementExecutor()) + .setSpan(Span.getInvalid()) .build(); } @@ -251,7 +258,7 @@ public void testGetStateAndIsActive() { DdlClient client = mock(DdlClient.class); SpannerException exception = mock(SpannerException.class); when(exception.getErrorCode()).thenReturn(ErrorCode.FAILED_PRECONDITION); - doThrow(exception).when(client).executeDdl(anyList()); + doThrow(exception).when(client).executeDdl(anyList(), isNull()); batch = createSubject(client); assertThat(batch.getState(), is(UnitOfWorkState.STARTED)); assertThat(batch.isActive(), is(true)); @@ -297,8 +304,8 @@ public void testRunBatch() { DdlBatch batch = createSubject(client); get(batch.runBatchAsync(CallType.SYNC)); assertThat(batch.getState(), is(UnitOfWorkState.RAN)); - verify(client, never()).executeDdl(anyString()); - verify(client, never()).executeDdl(argThat(isEmptyListOfStrings())); + verify(client, never()).executeDdl(anyString(), isNull()); + verify(client, never()).executeDdl(argThat(isEmptyListOfStrings()), isNull()); ParsedStatement statement = mock(ParsedStatement.class); when(statement.getType()).thenReturn(StatementType.DDL); @@ -309,14 +316,14 @@ public void testRunBatch() { batch = createSubject(client); batch.executeDdlAsync(CallType.SYNC, statement); get(batch.runBatchAsync(CallType.SYNC)); - verify(client).executeDdl(argThat(isListOfStringsWithSize(1))); + verify(client).executeDdl(argThat(isListOfStringsWithSize(1)), isNull()); client = createDefaultMockDdlClient(); batch = createSubject(client); batch.executeDdlAsync(CallType.SYNC, statement); batch.executeDdlAsync(CallType.SYNC, statement); get(batch.runBatchAsync(CallType.SYNC)); - verify(client).executeDdl(argThat(isListOfStringsWithSize(2))); + verify(client).executeDdl(argThat(isListOfStringsWithSize(2)), isNull()); assertThat(batch.getState(), is(UnitOfWorkState.RAN)); boolean exception = false; try { @@ -362,7 +369,48 @@ public void testRunBatch() { } assertThat(exception, is(true)); assertThat(batch.getState(), is(UnitOfWorkState.RUN_FAILED)); - verify(client).executeDdl(argThat(isListOfStringsWithSize(2))); + verify(client).executeDdl(argThat(isListOfStringsWithSize(2)), isNull()); + + // verify when protoDescriptors is null + client = createDefaultMockDdlClient(); + batch = + DdlBatch.newBuilder() + .setDdlClient(client) + .setDatabaseClient(mock(DatabaseClient.class)) + .withStatementExecutor(new StatementExecutor()) + .setSpan(Span.getInvalid()) + .setProtoDescriptors(null) + .build(); + batch.executeDdlAsync(CallType.SYNC, statement); + batch.executeDdlAsync(CallType.SYNC, statement); + get(batch.runBatchAsync(CallType.SYNC)); + verify(client).executeDdl(argThat(isListOfStringsWithSize(2)), isNull()); + + // verify when protoDescriptors is not null + byte[] protoDescriptors; + try { + InputStream in = + DdlBatchTest.class + .getClassLoader() + .getResourceAsStream("com/google/cloud/spanner/descriptors.pb"); + assertNotNull(in); + protoDescriptors = ByteStreams.toByteArray(in); + } catch (Exception e) { + throw SpannerExceptionFactory.newSpannerException(e); + } + client = createDefaultMockDdlClient(); + batch = + DdlBatch.newBuilder() + .setDdlClient(client) + .setDatabaseClient(mock(DatabaseClient.class)) + .withStatementExecutor(new StatementExecutor()) + .setSpan(Span.getInvalid()) + .setProtoDescriptors(protoDescriptors) + .build(); + batch.executeDdlAsync(CallType.SYNC, statement); + batch.executeDdlAsync(CallType.SYNC, statement); + get(batch.runBatchAsync(CallType.SYNC)); + verify(client).executeDdl(argThat(isListOfStringsWithSize(2)), any(byte[].class)); } @Test @@ -381,12 +429,14 @@ public void testUpdateCount() throws InterruptedException, ExecutionException { OperationFuture operationFuture = mock(OperationFuture.class); when(operationFuture.get()).thenReturn(null); when(operationFuture.getMetadata()).thenReturn(metadataFuture); - when(client.executeDdl(argThat(isListOfStringsWithSize(2)))).thenReturn(operationFuture); + when(client.executeDdl(argThat(isListOfStringsWithSize(2)), isNull())) + .thenReturn(operationFuture); DdlBatch batch = DdlBatch.newBuilder() .withStatementExecutor(new StatementExecutor()) .setDdlClient(client) .setDatabaseClient(mock(DatabaseClient.class)) + .setSpan(Span.getInvalid()) .build(); batch.executeDdlAsync( CallType.SYNC, @@ -419,12 +469,14 @@ public void testFailedUpdateCount() throws InterruptedException, ExecutionExcept new ExecutionException( "ddl statement failed", Status.INVALID_ARGUMENT.asRuntimeException())); when(operationFuture.getMetadata()).thenReturn(metadataFuture); - when(client.executeDdl(argThat(isListOfStringsWithSize(2)))).thenReturn(operationFuture); + when(client.executeDdl(argThat(isListOfStringsWithSize(2)), isNull())) + .thenReturn(operationFuture); DdlBatch batch = DdlBatch.newBuilder() .withStatementExecutor(new StatementExecutor()) .setDdlClient(client) .setDatabaseClient(mock(DatabaseClient.class)) + .setSpan(Span.getInvalid()) .build(); batch.executeDdlAsync( CallType.SYNC, @@ -461,12 +513,14 @@ public void testFailedAfterFirstStatement() throws InterruptedException, Executi new ExecutionException( "ddl statement failed", Status.INVALID_ARGUMENT.asRuntimeException())); when(operationFuture.getMetadata()).thenReturn(metadataFuture); - when(client.executeDdl(argThat(isListOfStringsWithSize(2)))).thenReturn(operationFuture); + when(client.executeDdl(argThat(isListOfStringsWithSize(2)), isNull())) + .thenReturn(operationFuture); DdlBatch batch = DdlBatch.newBuilder() .withStatementExecutor(new StatementExecutor()) .setDdlClient(client) .setDatabaseClient(mock(DatabaseClient.class)) + .setSpan(Span.getInvalid()) .build(); batch.executeDdlAsync( CallType.SYNC, @@ -492,8 +546,8 @@ public void testAbort() { DdlBatch batch = createSubject(client); batch.abortBatch(); assertThat(batch.getState(), is(UnitOfWorkState.ABORTED)); - verify(client, never()).executeDdl(anyString()); - verify(client, never()).executeDdl(anyList()); + verify(client, never()).executeDdl(anyString(), isNull()); + verify(client, never()).executeDdl(anyList(), isNull()); ParsedStatement statement = mock(ParsedStatement.class); when(statement.getType()).thenReturn(StatementType.DDL); @@ -504,21 +558,21 @@ public void testAbort() { batch = createSubject(client); batch.executeDdlAsync(CallType.SYNC, statement); batch.abortBatch(); - verify(client, never()).executeDdl(anyList()); + verify(client, never()).executeDdl(anyList(), isNull()); client = createDefaultMockDdlClient(); batch = createSubject(client); batch.executeDdlAsync(CallType.SYNC, statement); batch.executeDdlAsync(CallType.SYNC, statement); batch.abortBatch(); - verify(client, never()).executeDdl(anyList()); + verify(client, never()).executeDdl(anyList(), isNull()); client = createDefaultMockDdlClient(); batch = createSubject(client); batch.executeDdlAsync(CallType.SYNC, statement); batch.executeDdlAsync(CallType.SYNC, statement); batch.abortBatch(); - verify(client, never()).executeDdl(anyList()); + verify(client, never()).executeDdl(anyList(), isNull()); boolean exception = false; try { get(batch.runBatchAsync(CallType.SYNC)); @@ -529,7 +583,7 @@ public void testAbort() { exception = true; } assertThat(exception, is(true)); - verify(client, never()).executeDdl(anyList()); + verify(client, never()).executeDdl(anyList(), isNull()); } @Test @@ -556,7 +610,7 @@ public void testCancel() { public void testCommit() { DdlBatch batch = createSubject(); try { - batch.commitAsync(CallType.SYNC); + batch.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE); fail("expected FAILED_PRECONDITION"); } catch (SpannerException e) { assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); @@ -567,7 +621,7 @@ public void testCommit() { public void testRollback() { DdlBatch batch = createSubject(); try { - batch.rollbackAsync(CallType.SYNC); + batch.rollbackAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE); fail("expected FAILED_PRECONDITION"); } catch (SpannerException e) { assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DdlClientTests.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DdlClientTests.java index d46e4dca592..c61635fce23 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DdlClientTests.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DdlClientTests.java @@ -17,17 +17,25 @@ package com.google.cloud.spanner.connection; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.eq; import static org.mockito.Mockito.isNull; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.spanner.Database; import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.common.io.ByteStreams; import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import java.io.InputStream; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -39,11 +47,13 @@ @RunWith(JUnit4.class) public class DdlClientTests { + private final String projectId = "test-project"; private final String instanceId = "test-instance"; private final String databaseId = "test-database"; private DdlClient createSubject(DatabaseAdminClient client) { return DdlClient.newBuilder() + .setProjectId(projectId) .setInstanceId(instanceId) .setDatabaseName(databaseId) .setDatabaseAdminClient(client) @@ -52,21 +62,48 @@ private DdlClient createSubject(DatabaseAdminClient client) { @Test public void testExecuteDdl() throws InterruptedException, ExecutionException { + byte[] protoDescriptors; + try { + InputStream in = + DdlBatchTest.class + .getClassLoader() + .getResourceAsStream("com/google/cloud/spanner/descriptors.pb"); + assertNotNull(in); + protoDescriptors = ByteStreams.toByteArray(in); + } catch (Exception e) { + throw SpannerExceptionFactory.newSpannerException(e); + } + DatabaseAdminClient client = mock(DatabaseAdminClient.class); + Database database = mock(Database.class); + Database.Builder databaseBuilder = mock(Database.Builder.class); @SuppressWarnings("unchecked") OperationFuture operation = mock(OperationFuture.class); + when(operation.get()).thenReturn(null); - when(client.updateDatabaseDdl(eq(instanceId), eq(databaseId), anyList(), isNull())) - .thenReturn(operation); + when(client.newDatabaseBuilder((DatabaseId.of(projectId, instanceId, databaseId)))) + .thenReturn(databaseBuilder); + when(databaseBuilder.setProtoDescriptors(protoDescriptors)).thenReturn(databaseBuilder); + when(databaseBuilder.build()).thenReturn(database); + when(client.updateDatabaseDdl(eq(database), anyList(), isNull())).thenReturn(operation); + DdlClient subject = createSubject(client); String ddl = "CREATE TABLE FOO"; - subject.executeDdl(ddl); - verify(client).updateDatabaseDdl(instanceId, databaseId, Collections.singletonList(ddl), null); + subject.executeDdl(ddl, null); + verify(databaseBuilder, never()).setProtoDescriptors(any(byte[].class)); + verify(client).updateDatabaseDdl(database, Collections.singletonList(ddl), null); subject = createSubject(client); List ddlList = Arrays.asList("CREATE TABLE FOO", "DROP TABLE FOO"); - subject.executeDdl(ddlList); - verify(client).updateDatabaseDdl(instanceId, databaseId, ddlList, null); + subject.executeDdl(ddlList, null); + verify(databaseBuilder, never()).setProtoDescriptors(any(byte[].class)); + verify(client).updateDatabaseDdl(database, ddlList, null); + + subject = createSubject(client); + ddlList = Arrays.asList("CREATE PROTO BUNDLE", "CREATE TABLE FOO"); + subject.executeDdl(ddlList, protoDescriptors); + verify(databaseBuilder).setProtoDescriptors(protoDescriptors); + verify(client).updateDatabaseDdl(database, ddlList, null); } @Test diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DecodeModeTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DecodeModeTest.java index 6a6125e1dda..b64a05b2ef4 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DecodeModeTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DecodeModeTest.java @@ -27,6 +27,12 @@ import com.google.cloud.spanner.ResultSet; import com.google.cloud.spanner.SpannerException; import com.google.cloud.spanner.Statement; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadLocalRandom; import org.junit.After; import org.junit.Test; import org.junit.runner.RunWith; @@ -41,7 +47,7 @@ public void clearRequests() { } @Test - public void testAllDecodeModes() { + public void testAllDecodeModes() throws Exception { int numRows = 10; RandomResultSetGenerator generator = new RandomResultSetGenerator(numRows); String sql = "select * from random"; @@ -50,57 +56,85 @@ public void testAllDecodeModes() { MockSpannerServiceImpl.StatementResult.query(statement, generator.generate())); try (Connection connection = createConnection()) { - for (boolean readonly : new boolean[] {true, false}) { - for (boolean autocommit : new boolean[] {true, false}) { - connection.setReadOnly(readonly); - connection.setAutocommit(autocommit); + for (boolean multiThreaded : new boolean[] {true, false}) { + for (boolean readonly : new boolean[] {true, false}) { + for (boolean autocommit : new boolean[] {true, false}) { + connection.setReadOnly(readonly); + connection.setAutocommit(autocommit); - int receivedRows = 0; - // DecodeMode#DIRECT is not supported in read/write transactions, as the protobuf value is - // used for checksum calculation. - try (ResultSet direct = - connection.executeQuery( - statement, - !readonly && !autocommit - ? Options.decodeMode(DecodeMode.LAZY_PER_ROW) - : Options.decodeMode(DecodeMode.DIRECT)); - ResultSet lazyPerRow = - connection.executeQuery(statement, Options.decodeMode(DecodeMode.LAZY_PER_ROW)); - ResultSet lazyPerCol = - connection.executeQuery(statement, Options.decodeMode(DecodeMode.LAZY_PER_COL))) { - while (direct.next() && lazyPerRow.next() && lazyPerCol.next()) { - assertEquals(direct.getColumnCount(), lazyPerRow.getColumnCount()); - assertEquals(direct.getColumnCount(), lazyPerCol.getColumnCount()); - for (int col = 0; col < direct.getColumnCount(); col++) { - // Test getting the entire row as a struct both as the first thing we do, and as the - // last thing we do. This ensures that the method works as expected both when a row - // is lazily decoded by this method, and when it has already been decoded by another - // method. - if (col % 2 == 0) { - assertEquals(direct.getCurrentRowAsStruct(), lazyPerRow.getCurrentRowAsStruct()); - assertEquals(direct.getCurrentRowAsStruct(), lazyPerCol.getCurrentRowAsStruct()); - } - assertEquals(direct.isNull(col), lazyPerRow.isNull(col)); - assertEquals(direct.isNull(col), lazyPerCol.isNull(col)); - assertEquals(direct.getValue(col), lazyPerRow.getValue(col)); - assertEquals(direct.getValue(col), lazyPerCol.getValue(col)); - if (col % 2 == 1) { - assertEquals(direct.getCurrentRowAsStruct(), lazyPerRow.getCurrentRowAsStruct()); - assertEquals(direct.getCurrentRowAsStruct(), lazyPerCol.getCurrentRowAsStruct()); + int receivedRows = 0; + // DecodeMode#DIRECT is not supported in read/write transactions, as the protobuf value + // is + // used for checksum calculation. + try (ResultSet direct = + connection.executeQuery( + statement, + !readonly && !autocommit + ? Options.decodeMode(DecodeMode.LAZY_PER_ROW) + : Options.decodeMode(DecodeMode.DIRECT)); + ResultSet lazyPerRow = + connection.executeQuery( + statement, Options.decodeMode(DecodeMode.LAZY_PER_ROW)); + ResultSet lazyPerCol = + connection.executeQuery( + statement, Options.decodeMode(DecodeMode.LAZY_PER_COL))) { + while (direct.next() && lazyPerRow.next() && lazyPerCol.next()) { + assertEquals(direct.getColumnCount(), lazyPerRow.getColumnCount()); + assertEquals(direct.getColumnCount(), lazyPerCol.getColumnCount()); + if (multiThreaded) { + ExecutorService service = Executors.newFixedThreadPool(direct.getColumnCount()); + List> futures = new ArrayList<>(direct.getColumnCount()); + for (int col = 0; col < direct.getColumnCount(); col++) { + final int colNumber = col; + futures.add( + service.submit( + () -> checkRowValues(colNumber, direct, lazyPerRow, lazyPerCol))); + } + service.shutdown(); + for (Future future : futures) { + future.get(); + } + } else { + for (int col = 0; col < direct.getColumnCount(); col++) { + checkRowValues(col, direct, lazyPerRow, lazyPerCol); + } } + receivedRows++; } - receivedRows++; + assertEquals(numRows, receivedRows); + } + if (!autocommit) { + connection.commit(); } - assertEquals(numRows, receivedRows); - } - if (!autocommit) { - connection.commit(); } } } } } + private void checkRowValues( + int col, ResultSet direct, ResultSet lazyPerRow, ResultSet lazyPerCol) { + // Randomly decode and get a column to trigger parallel decoding of one column. + lazyPerCol.getValue(ThreadLocalRandom.current().nextInt(lazyPerCol.getColumnCount())); + + // Test getting the entire row as a struct both as the first thing we do, and as the + // last thing we do. This ensures that the method works as expected both when a row + // is lazily decoded by this method, and when it has already been decoded by another + // method. + if (col % 2 == 0) { + assertEquals(direct.getCurrentRowAsStruct(), lazyPerRow.getCurrentRowAsStruct()); + assertEquals(direct.getCurrentRowAsStruct(), lazyPerCol.getCurrentRowAsStruct()); + } + assertEquals(direct.isNull(col), lazyPerRow.isNull(col)); + assertEquals(direct.isNull(col), lazyPerCol.isNull(col)); + assertEquals(direct.getValue(col), lazyPerRow.getValue(col)); + assertEquals(direct.getValue(col), lazyPerCol.getValue(col)); + if (col % 2 == 1) { + assertEquals(direct.getCurrentRowAsStruct(), lazyPerRow.getCurrentRowAsStruct()); + assertEquals(direct.getCurrentRowAsStruct(), lazyPerCol.getCurrentRowAsStruct()); + } + } + @Test public void testDecodeModeDirect_failsInReadWriteTransaction() { int numRows = 1; diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DmlBatchTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DmlBatchTest.java index 289683e34f5..629ae41daf4 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DmlBatchTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DmlBatchTest.java @@ -38,6 +38,7 @@ import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType; import com.google.cloud.spanner.connection.UnitOfWork.CallType; import com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState; +import io.opentelemetry.api.trace.Span; import java.util.Arrays; import java.util.Collections; import org.junit.Test; @@ -65,6 +66,7 @@ private DmlBatch createSubject(UnitOfWork transaction) { return DmlBatch.newBuilder() .setTransaction(transaction) .withStatementExecutor(new StatementExecutor()) + .setSpan(Span.getInvalid()) .build(); } @@ -193,7 +195,7 @@ public void testGetStateAndIsActive() { public void testCommit() { DmlBatch batch = createSubject(); try { - batch.commitAsync(CallType.SYNC); + batch.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE); fail("Expected exception"); } catch (SpannerException e) { assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); @@ -204,7 +206,7 @@ public void testCommit() { public void testRollback() { DmlBatch batch = createSubject(); try { - batch.rollbackAsync(CallType.SYNC); + batch.rollbackAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE); fail("Expected exception"); } catch (SpannerException e) { assertEquals(ErrorCode.FAILED_PRECONDITION, e.getErrorCode()); diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DurationConverterTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DurationConverterTest.java index 0c26f5b3b76..9e3c23cf5ce 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DurationConverterTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/DurationConverterTest.java @@ -18,14 +18,14 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertNotNull; import com.google.cloud.spanner.Dialect; import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.DurationConverter; -import com.google.protobuf.Duration; +import java.time.Duration; import java.util.concurrent.TimeUnit; import org.junit.Test; import org.junit.runner.RunWith; @@ -38,49 +38,32 @@ public void testConvert() throws CompileException { String allowedValues = ReadOnlyStalenessConverterTest.getAllowedValues( DurationConverter.class, Dialect.GOOGLE_STANDARD_SQL); - assertThat(allowedValues, is(notNullValue())); + assertNotNull(allowedValues); DurationConverter converter = new DurationConverter(allowedValues); - assertThat( - converter.convert("'100ms'"), - is( - equalTo( - Duration.newBuilder() - .setNanos((int) TimeUnit.MILLISECONDS.toNanos(100L)) - .build()))); + assertThat(converter.convert("'100ms'"), is(equalTo(Duration.ofMillis(100L)))); + assertThat(converter.convert("100"), is(equalTo(Duration.ofMillis(100)))); assertThat(converter.convert("'0ms'"), is(nullValue())); assertThat(converter.convert("'-100ms'"), is(nullValue())); assertThat( - converter.convert("'315576000000000ms'"), - is(equalTo(Duration.newBuilder().setSeconds(315576000000L).build()))); - assertThat( - converter.convert("'1000ms'"), is(equalTo(Duration.newBuilder().setSeconds(1L).build()))); + converter.convert("'315576000000000ms'"), is(equalTo(Duration.ofSeconds(315576000000L)))); + assertThat(converter.convert("'1000ms'"), is(equalTo(Duration.ofSeconds(1L)))); assertThat( converter.convert("'1001ms'"), - is( - equalTo( - Duration.newBuilder() - .setSeconds(1L) - .setNanos((int) TimeUnit.MILLISECONDS.toNanos(1L)) - .build()))); + is(equalTo(Duration.ofSeconds(1L, TimeUnit.MILLISECONDS.toNanos(1L))))); - assertThat(converter.convert("'1ns'"), is(equalTo(Duration.newBuilder().setNanos(1).build()))); - assertThat( - converter.convert("'1us'"), is(equalTo(Duration.newBuilder().setNanos(1000).build()))); - assertThat( - converter.convert("'1ms'"), is(equalTo(Duration.newBuilder().setNanos(1000000).build()))); - assertThat( - converter.convert("'999999999ns'"), - is(equalTo(Duration.newBuilder().setNanos(999999999).build()))); - assertThat( - converter.convert("'1s'"), is(equalTo(Duration.newBuilder().setSeconds(1L).build()))); + assertThat(converter.convert("'1ns'"), is(equalTo(Duration.ofNanos(1)))); + assertThat(converter.convert("'1us'"), is(equalTo(Duration.ofNanos(1000)))); + assertThat(converter.convert("'1ms'"), is(equalTo(Duration.ofNanos(1000000)))); + assertThat(converter.convert("'999999999ns'"), is(equalTo(Duration.ofNanos(999999999)))); + assertThat(converter.convert("'1s'"), is(equalTo(Duration.ofSeconds(1L)))); assertThat(converter.convert("''"), is(nullValue())); assertThat(converter.convert("' '"), is(nullValue())); assertThat(converter.convert("'random string'"), is(nullValue())); - assertThat(converter.convert("null"), is(equalTo(Duration.getDefaultInstance()))); - assertThat(converter.convert("NULL"), is(equalTo(Duration.getDefaultInstance()))); - assertThat(converter.convert("Null"), is(equalTo(Duration.getDefaultInstance()))); + assertThat(converter.convert("null"), is(equalTo(Duration.ZERO))); + assertThat(converter.convert("NULL"), is(equalTo(Duration.ZERO))); + assertThat(converter.convert("Null"), is(equalTo(Duration.ZERO))); assertThat(converter.convert("'null'"), is(nullValue())); assertThat(converter.convert("'NULL'"), is(nullValue())); assertThat(converter.convert("'Null'"), is(nullValue())); diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ExcludeTxnFromChangeStreamsMockServerTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ExcludeTxnFromChangeStreamsMockServerTest.java new file mode 100644 index 00000000000..5fc8fc7cfef --- /dev/null +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ExcludeTxnFromChangeStreamsMockServerTest.java @@ -0,0 +1,236 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.common.collect.ImmutableList; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ExcludeTxnFromChangeStreamsMockServerTest extends AbstractMockServerTest { + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + } + + @Test + public void testAutoCommit_includedByDefault() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.executeUpdate(INSERT_STATEMENT); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertFalse(request.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testAutoCommitUpdate() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setExcludeTxnFromChangeStreams(true); + connection.executeUpdate(INSERT_STATEMENT); + + // Verify that the setting is reset after executing a transaction. + assertFalse(connection.isExcludeTxnFromChangeStreams()); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertTrue(request.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testAutoCommitBatchDml() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setExcludeTxnFromChangeStreams(true); + connection.executeBatchUpdate(ImmutableList.of(INSERT_STATEMENT, INSERT_STATEMENT)); + + assertFalse(connection.isExcludeTxnFromChangeStreams()); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertTrue(request.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testAutoCommitUpdateReturning() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setExcludeTxnFromChangeStreams(true); + connection.executeQuery(INSERT_RETURNING_STATEMENT); + + assertFalse(connection.isExcludeTxnFromChangeStreams()); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertTrue(request.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testPartitionedDml() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + connection.setAutocommitDmlMode(AutocommitDmlMode.PARTITIONED_NON_ATOMIC); + connection.setExcludeTxnFromChangeStreams(true); + connection.executeUpdate(INSERT_STATEMENT); + + // Verify that the setting is reset after executing a transaction. + assertFalse(connection.isExcludeTxnFromChangeStreams()); + } + + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + BeginTransactionRequest request = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class).get(0); + assertTrue(request.hasOptions()); + assertTrue(request.getOptions().hasPartitionedDml()); + assertTrue(request.getOptions().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testTransaction_includedByDefault() { + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.executeUpdate(INSERT_STATEMENT); + connection.commit(); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertFalse(request.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testTransactionUpdate() { + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.setExcludeTxnFromChangeStreams(true); + connection.executeUpdate(INSERT_STATEMENT); + connection.commit(); + + // Verify that the setting is reset after executing a transaction. + assertFalse(connection.isExcludeTxnFromChangeStreams()); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertTrue(request.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testTransactionBatchDml() { + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.setExcludeTxnFromChangeStreams(true); + connection.executeBatchUpdate(ImmutableList.of(INSERT_STATEMENT, INSERT_STATEMENT)); + connection.commit(); + + assertFalse(connection.isExcludeTxnFromChangeStreams()); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertTrue(request.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testTransactionUpdateReturning() { + try (Connection connection = createConnection()) { + connection.setAutocommit(false); + connection.setExcludeTxnFromChangeStreams(true); + connection.executeQuery(INSERT_RETURNING_STATEMENT); + connection.commit(); + + assertFalse(connection.isExcludeTxnFromChangeStreams()); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertTrue(request.getTransaction().getBegin().getExcludeTxnFromChangeStreams()); + } + + @Test + public void testSqlStatements() { + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + + connection.execute(Statement.of("set exclude_txn_from_change_streams = true")); + assertTrue(connection.isExcludeTxnFromChangeStreams()); + + try (ResultSet resultSet = + connection + .execute(Statement.of("show variable exclude_txn_from_change_streams")) + .getResultSet()) { + assertTrue(resultSet.next()); + assertTrue(resultSet.getBoolean("EXCLUDE_TXN_FROM_CHANGE_STREAMS")); + assertFalse(resultSet.next()); + } + + connection.setAutocommit(false); + connection.execute(Statement.of("set exclude_txn_from_change_streams = true")); + assertTrue(connection.isExcludeTxnFromChangeStreams()); + connection.execute(INSERT_STATEMENT); + assertThrows( + SpannerException.class, + () -> connection.execute(Statement.of("set exclude_txn_from_change_streams=false"))); + } + } +} diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/NoopEndTransactionCallback.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/NoopEndTransactionCallback.java new file mode 100644 index 00000000000..6145d9770b5 --- /dev/null +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/NoopEndTransactionCallback.java @@ -0,0 +1,31 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.cloud.spanner.connection.UnitOfWork.EndTransactionCallback; + +class NoopEndTransactionCallback implements EndTransactionCallback { + static final NoopEndTransactionCallback INSTANCE = new NoopEndTransactionCallback(); + + private NoopEndTransactionCallback() {} + + @Override + public void onSuccess() {} + + @Override + public void onFailure() {} +} diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/OpenTelemetryTracingTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/OpenTelemetryTracingTest.java new file mode 100644 index 00000000000..a8e579feb1a --- /dev/null +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/OpenTelemetryTracingTest.java @@ -0,0 +1,681 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static com.google.cloud.spanner.connection.Repeat.twice; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.SpannerOptions.SpannerEnvironment; +import com.google.cloud.spanner.SpannerOptionsTestHelper; +import com.google.cloud.spanner.Statement; +import com.google.common.collect.ImmutableList; +import com.google.longrunning.Operation; +import com.google.protobuf.Any; +import com.google.protobuf.Empty; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import io.opentelemetry.api.GlobalOpenTelemetry; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator; +import io.opentelemetry.context.propagation.ContextPropagators; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.testing.exporter.InMemorySpanExporter; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.data.EventData; +import io.opentelemetry.sdk.trace.data.SpanData; +import io.opentelemetry.sdk.trace.export.SimpleSpanProcessor; +import java.util.List; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class OpenTelemetryTracingTest extends AbstractMockServerTest { + private static InMemorySpanExporter spanExporter; + + private static OpenTelemetrySdk openTelemetry; + + @BeforeClass + public static void setupOpenTelemetry() { + SpannerOptionsTestHelper.resetActiveTracingFramework(); + SpannerOptions.enableOpenTelemetryTraces(); + GlobalOpenTelemetry.resetForTest(); + + spanExporter = InMemorySpanExporter.create(); + + SdkTracerProvider tracerProvider = + SdkTracerProvider.builder() + .addSpanProcessor(SimpleSpanProcessor.create(spanExporter)) + .build(); + + openTelemetry = + OpenTelemetrySdk.builder() + .setPropagators(ContextPropagators.create(W3CTraceContextPropagator.getInstance())) + .setTracerProvider(tracerProvider) + .buildAndRegisterGlobal(); + } + + @AfterClass + public static void closeOpenTelemetry() { + SpannerPool.closeSpannerPool(); + if (openTelemetry != null) { + openTelemetry.close(); + } + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + spanExporter.reset(); + } + + Connection createTestConnection() { + return createTestConnection(getBaseUrl() + ";enableExtendedTracing=true"); + } + + Connection createTestConnection(String url) { + return ConnectionOptions.newBuilder() + .setTracingPrefix("CloudSpannerJdbc") + .setUri(url) + .build() + .getConnection(); + } + + @Test + public void testSingleUseQuery_withoutSqlStatement() { + try (Connection connection = createTestConnection(getBaseUrl())) { + connection.setAutocommit(true); + try (ResultSet resultSet = connection.executeQuery(SELECT1_STATEMENT)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + // There are two query spans: One for getting the database dialect, and one for the actual + // test query. + assertEquals( + 2, + spans.stream() + .filter(span -> span.getName().equals("CloudSpannerOperation.ExecuteStreamingQuery")) + .count()); + // Verify that both do not have any db.statement attribute. + assertEquals( + 2, + spans.stream() + .filter( + span -> + span.getName().equals("CloudSpannerOperation.ExecuteStreamingQuery") + && span.getAttributes().get(AttributeKey.stringKey("db.statement")) == null) + .count()); + } + + @Test + public void testSingleUseQuery_withoutSqlStatement_usingEnvVar() { + SpannerPool.closeSpannerPool(); + SpannerOptions.useEnvironment( + new SpannerEnvironment() { + @Override + public boolean isEnableExtendedTracing() { + return true; + } + }); + + try (Connection connection = createTestConnection(getBaseUrl())) { + connection.setAutocommit(true); + try (ResultSet resultSet = connection.executeQuery(SELECT1_STATEMENT)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + } finally { + SpannerOptions.useDefaultEnvironment(); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpannerJdbc.SingleUseTransaction", spans); + assertContains("CloudSpanner.ReadOnlyTransaction", spans); + assertContains( + "CloudSpannerOperation.ExecuteStreamingQuery", + Attributes.of(AttributeKey.stringKey("db.statement"), SELECT1_STATEMENT.getSql()), + spans); + SpanData executeQuerySpan = + getSpan( + "CloudSpannerOperation.ExecuteStreamingQuery", + Attributes.of( + AttributeKey.stringKey("db.statement"), SELECT1_STATEMENT.getSql(), + AttributeKey.stringKey("thread.name"), Thread.currentThread().getName()), + spans); + + assertParent( + "CloudSpannerJdbc.SingleUseTransaction", "CloudSpanner.ReadOnlyTransaction", spans); + assertParent( + "CloudSpanner.ReadOnlyTransaction", + "CloudSpannerOperation.ExecuteStreamingQuery", + Attributes.of(AttributeKey.stringKey("db.statement"), SELECT1_STATEMENT.getSql()), + spans); + } + + @Test + public void testSingleUseQuery() { + try (Connection connection = createTestConnection()) { + connection.setAutocommit(true); + try (ResultSet resultSet = connection.executeQuery(SELECT1_STATEMENT)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpannerJdbc.SingleUseTransaction", spans); + assertContains("CloudSpanner.ReadOnlyTransaction", spans); + assertContains( + "CloudSpannerOperation.ExecuteStreamingQuery", + Attributes.of(AttributeKey.stringKey("db.statement"), SELECT1_STATEMENT.getSql()), + spans); + SpanData executeQuerySpan = + getSpan( + "CloudSpannerOperation.ExecuteStreamingQuery", + Attributes.of( + AttributeKey.stringKey("db.statement"), SELECT1_STATEMENT.getSql(), + AttributeKey.stringKey("thread.name"), Thread.currentThread().getName()), + spans); + + assertParent( + "CloudSpannerJdbc.SingleUseTransaction", "CloudSpanner.ReadOnlyTransaction", spans); + assertParent( + "CloudSpanner.ReadOnlyTransaction", + "CloudSpannerOperation.ExecuteStreamingQuery", + Attributes.of(AttributeKey.stringKey("db.statement"), SELECT1_STATEMENT.getSql()), + spans); + } + + @Test + public void testSingleUseUpdate() { + try (Connection connection = createTestConnection()) { + connection.setAutocommit(true); + connection.executeUpdate(INSERT_STATEMENT); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpannerJdbc.SingleUseTransaction", spans); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + assertContains( + "CloudSpannerOperation.ExecuteUpdate", + Attributes.of(AttributeKey.stringKey("db.statement"), INSERT_STATEMENT.getSql()), + spans); + assertContains("CloudSpannerOperation.Commit", spans); + + assertParent( + "CloudSpannerJdbc.SingleUseTransaction", "CloudSpanner.ReadWriteTransaction", spans); + assertParent( + "CloudSpanner.ReadWriteTransaction", + "CloudSpannerOperation.ExecuteUpdate", + Attributes.of(AttributeKey.stringKey("db.statement"), INSERT_STATEMENT.getSql()), + spans); + SpanData executeQuerySpan = + getSpan( + "CloudSpannerOperation.ExecuteUpdate", + Attributes.of( + AttributeKey.stringKey("db.statement"), INSERT_STATEMENT.getSql(), + AttributeKey.stringKey("thread.name"), Thread.currentThread().getName()), + spans); + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.Commit", spans); + } + + @Test + public void testSingleUseBatchUpdate() { + try (Connection connection = createTestConnection()) { + connection.setAutocommit(true); + connection.startBatchDml(); + connection.executeUpdate(INSERT_STATEMENT); + connection.executeUpdate(INSERT_STATEMENT); + connection.runBatch(); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpannerJdbc.SingleUseTransaction", spans); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + assertContains( + "CloudSpannerOperation.BatchUpdate", + Attributes.of( + AttributeKey.stringArrayKey("db.statement"), + ImmutableList.of(INSERT_STATEMENT.getSql(), INSERT_STATEMENT.getSql())), + spans); + SpanData executeQuerySpan = + getSpan( + "CloudSpannerOperation.BatchUpdate", + Attributes.of( + AttributeKey.stringArrayKey("db.statement"), + ImmutableList.of(INSERT_STATEMENT.getSql(), INSERT_STATEMENT.getSql())), + spans); + String threadName = executeQuerySpan.getAttributes().get(AttributeKey.stringKey("thread.name")); + assertEquals(Thread.currentThread().getName(), threadName); + assertContains("CloudSpannerOperation.Commit", spans); + + assertParent( + "CloudSpannerJdbc.SingleUseTransaction", "CloudSpanner.ReadWriteTransaction", spans); + assertParent( + "CloudSpanner.ReadWriteTransaction", + "CloudSpannerOperation.BatchUpdate", + Attributes.of( + AttributeKey.stringArrayKey("db.statement"), + ImmutableList.of(INSERT_STATEMENT.getSql(), INSERT_STATEMENT.getSql())), + spans); + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.Commit", spans); + } + + @Test + public void testSingleUseDdl() { + String ddl = "CREATE TABLE foo (id int64) PRIMARY KEY (id)"; + addUpdateDdlResponse(); + + try (Connection connection = createTestConnection()) { + connection.setAutocommit(true); + connection.execute(Statement.of(ddl)); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains( + "CloudSpannerJdbc.DdlStatement", + Attributes.of(AttributeKey.stringKey("db.statement"), ddl), + spans); + } + + @Test + public void testSingleUseDdlBatch() { + String ddl1 = "CREATE TABLE foo (id int64, value string(max)) PRIMARY KEY (id)"; + String ddl2 = "CREATE INDEX idx_foo ON foo (value)"; + addUpdateDdlResponse(); + + try (Connection connection = createTestConnection()) { + connection.setAutocommit(true); + connection.startBatchDdl(); + connection.execute(Statement.of(ddl1)); + connection.execute(Statement.of(ddl2)); + connection.runBatch(); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains( + "CloudSpannerJdbc.DdlBatch", + Attributes.of(AttributeKey.stringArrayKey("db.statement"), ImmutableList.of(ddl1, ddl2)), + spans); + } + + @Test + public void testMultiUseReadOnlyQueries() { + try (Connection connection = createTestConnection()) { + connection.setAutocommit(false); + connection.setReadOnly(true); + twice( + () -> { + try (ResultSet resultSet = connection.executeQuery(SELECT1_STATEMENT)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + }); + connection.commit(); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpannerJdbc.ReadOnlyTransaction", spans); + assertContains("CloudSpanner.ReadOnlyTransaction", spans); + assertContains( + "CloudSpannerOperation.ExecuteStreamingQuery", + 2, + Attributes.of(AttributeKey.stringKey("db.statement"), SELECT1_STATEMENT.getSql()), + spans); + assertParent("CloudSpannerJdbc.ReadOnlyTransaction", "CloudSpanner.ReadOnlyTransaction", spans); + assertParent( + "CloudSpanner.ReadOnlyTransaction", + "CloudSpannerOperation.ExecuteStreamingQuery", + Attributes.of(AttributeKey.stringKey("db.statement"), SELECT1_STATEMENT.getSql()), + spans); + } + + @Test + public void testMultiUseReadWriteQueries() { + try (Connection connection = createTestConnection()) { + connection.setAutocommit(false); + connection.setReadOnly(false); + twice( + () -> { + try (ResultSet resultSet = connection.executeQuery(SELECT1_STATEMENT)) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + }); + connection.commit(); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpannerJdbc.ReadWriteTransaction", spans); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + assertContains( + "CloudSpannerOperation.ExecuteStreamingQuery", + 2, + Attributes.of(AttributeKey.stringKey("db.statement"), SELECT1_STATEMENT.getSql()), + spans); + assertContains("CloudSpannerOperation.Commit", spans); + assertParent( + "CloudSpannerJdbc.ReadWriteTransaction", "CloudSpanner.ReadWriteTransaction", spans); + assertParent( + "CloudSpanner.ReadWriteTransaction", + "CloudSpannerOperation.ExecuteStreamingQuery", + Attributes.of(AttributeKey.stringKey("db.statement"), SELECT1_STATEMENT.getSql()), + spans); + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.Commit", spans); + } + + @Test + public void testMultiUseReadWriteUpdates() { + try (Connection connection = createTestConnection()) { + connection.setAutocommit(false); + connection.setReadOnly(false); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + connection.commit(); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpannerJdbc.ReadWriteTransaction", spans); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + assertContains( + "CloudSpannerOperation.ExecuteUpdate", + 2, + Attributes.of(AttributeKey.stringKey("db.statement"), INSERT_STATEMENT.getSql()), + spans); + assertContains("CloudSpannerOperation.Commit", spans); + assertParent( + "CloudSpannerJdbc.ReadWriteTransaction", "CloudSpanner.ReadWriteTransaction", spans); + assertParent( + "CloudSpanner.ReadWriteTransaction", + "CloudSpannerOperation.ExecuteUpdate", + Attributes.of(AttributeKey.stringKey("db.statement"), INSERT_STATEMENT.getSql()), + spans); + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.Commit", spans); + } + + @Test + public void testMultiUseReadWriteBatchUpdates() { + try (Connection connection = createTestConnection()) { + connection.setAutocommit(false); + connection.setReadOnly(false); + + twice( + () -> { + connection.startBatchDml(); + connection.executeUpdate(INSERT_STATEMENT); + connection.executeUpdate(INSERT_STATEMENT); + connection.runBatch(); + }); + + connection.commit(); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpannerJdbc.ReadWriteTransaction", spans); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + assertContains( + "CloudSpannerOperation.BatchUpdate", + 2, + Attributes.of( + AttributeKey.stringArrayKey("db.statement"), + ImmutableList.of(INSERT_STATEMENT.getSql(), INSERT_STATEMENT.getSql())), + spans); + assertContains("CloudSpannerOperation.Commit", spans); + assertParent( + "CloudSpannerJdbc.ReadWriteTransaction", "CloudSpanner.ReadWriteTransaction", spans); + assertParent( + "CloudSpanner.ReadWriteTransaction", + "CloudSpannerOperation.BatchUpdate", + Attributes.of( + AttributeKey.stringArrayKey("db.statement"), + ImmutableList.of(INSERT_STATEMENT.getSql(), INSERT_STATEMENT.getSql())), + spans); + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.Commit", spans); + } + + @Test + public void testMultiUseReadWriteAborted() { + try (Connection connection = createTestConnection()) { + connection.setAutocommit(false); + connection.setReadOnly(false); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + mockSpanner.abortNextStatement(); + connection.commit(); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpannerJdbc.ReadWriteTransaction", spans); + assertContains("CloudSpanner.ReadWriteTransaction", 1, Attributes.empty(), spans); + SpanData transactionSpan = + spans.stream() + .filter(span -> span.getName().equals("CloudSpannerJdbc.ReadWriteTransaction")) + .findFirst() + .orElseThrow(IllegalStateException::new); + assertEquals( + Boolean.TRUE, + transactionSpan.getAttributes().get(AttributeKey.booleanKey("transaction.retried"))); + assertEquals(1, transactionSpan.getTotalRecordedEvents()); + EventData event = transactionSpan.getEvents().get(0); + assertEquals( + "Transaction aborted. Backing off for 0 milliseconds and retrying.", event.getName()); + // The transaction is retried, so we get the ExecuteUpdate and Commit spans twice. + assertContains( + "CloudSpannerOperation.ExecuteUpdate", + 2, + Attributes.of(AttributeKey.stringKey("db.statement"), INSERT_STATEMENT.getSql()), + spans); + assertContains("CloudSpannerOperation.Commit", 2, Attributes.empty(), spans); + + assertParent( + "CloudSpannerJdbc.ReadWriteTransaction", "CloudSpanner.ReadWriteTransaction", spans); + assertParent( + "CloudSpanner.ReadWriteTransaction", + "CloudSpannerOperation.ExecuteUpdate", + Attributes.of(AttributeKey.stringKey("db.statement"), INSERT_STATEMENT.getSql()), + spans); + assertParent("CloudSpanner.ReadWriteTransaction", "CloudSpannerOperation.Commit", spans); + } + + @Test + public void testSavepoint() { + Statement statement1 = Statement.of("insert into foo (id) values (1)"); + Statement statement2 = Statement.of("insert into foo (id) values (2)"); + mockSpanner.putStatementResult(MockSpannerServiceImpl.StatementResult.update(statement1, 1)); + mockSpanner.putStatementResult(MockSpannerServiceImpl.StatementResult.update(statement2, 1)); + + try (Connection connection = createTestConnection()) { + connection.setAutocommit(false); + connection.setReadOnly(false); + connection.setSavepointSupport(SavepointSupport.ENABLED); + assertEquals(1L, connection.executeUpdate(statement1)); + connection.savepoint("test"); + assertEquals(1L, connection.executeUpdate(statement2)); + connection.rollbackToSavepoint("test"); + connection.commit(); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains("CloudSpannerJdbc.ReadWriteTransaction", spans); + assertContains("CloudSpanner.ReadWriteTransaction", spans); + // Statement 1 is executed 2 times, because the original transaction needs to be + // retried after the transaction was rolled back to the savepoint. + assertContains( + "CloudSpannerOperation.ExecuteUpdate", + 2, + Attributes.of(AttributeKey.stringKey("db.statement"), statement1.getSql()), + spans); + assertContains( + "CloudSpannerOperation.ExecuteUpdate", + 1, + Attributes.of(AttributeKey.stringKey("db.statement"), statement2.getSql()), + spans); + assertContains("CloudSpannerOperation.Commit", spans); + + // Verify that we have two Cloud Spanner transactions, and that these are both children of one + // JDBC transaction. + List transactionSpans = + getSpans("CloudSpanner.ReadWriteTransaction", Attributes.empty(), spans); + assertEquals(2, transactionSpans.size()); + assertEquals( + transactionSpans.get(0).getParentSpanId(), transactionSpans.get(1).getParentSpanId()); + List jdbcTransactionSpans = + getSpans("CloudSpannerJdbc.ReadWriteTransaction", Attributes.empty(), spans); + assertEquals(1, jdbcTransactionSpans.size()); + assertEquals( + jdbcTransactionSpans.get(0).getSpanId(), transactionSpans.get(0).getParentSpanId()); + List commitSpans = + getSpans("CloudSpannerOperation.Commit", Attributes.empty(), spans); + assertEquals(1, commitSpans.size()); + assertEquals(transactionSpans.get(1).getSpanId(), commitSpans.get(0).getParentSpanId()); + } + + @Test + public void testTransactionTag() { + try (Connection connection = createTestConnection()) { + connection.setAutocommit(false); + connection.setReadOnly(false); + connection.setTransactionTag("my_tag"); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + connection.commit(); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains( + "CloudSpanner.ReadWriteTransaction", + 1, + Attributes.of(AttributeKey.stringKey("transaction.tag"), "my_tag"), + spans); + } + + @Test + public void testStatementTag() { + try (Connection connection = createTestConnection()) { + connection.setAutocommit(false); + connection.setReadOnly(false); + connection.setStatementTag("my_tag"); + assertEquals(1L, connection.executeUpdate(INSERT_STATEMENT)); + connection.commit(); + } + assertEquals(CompletableResultCode.ofSuccess(), spanExporter.flush()); + List spans = spanExporter.getFinishedSpanItems(); + assertContains( + "CloudSpannerOperation.ExecuteUpdate", + 1, + Attributes.of(AttributeKey.stringKey("statement.tag"), "my_tag"), + spans); + } + + void assertContains(String expected, List spans) { + assertTrue( + "Expected " + spansToString(spans) + " to contain " + expected, + spans.stream().anyMatch(span -> span.getName().equals(expected))); + } + + void assertContains(String expected, Attributes attributes, List spans) { + assertContains(expected, 1, attributes, spans); + } + + void assertContains(String expected, int count, Attributes attributes, List spans) { + assertEquals( + "Expected " + spansToString(spans) + " to contain " + expected, + count, + spans.stream().filter(span -> equalsSpan(span, expected, attributes)).count()); + } + + boolean equalsSpan(SpanData span, String name, Attributes attributes) { + if (!span.getName().equals(name)) { + return false; + } + for (Entry, Object> entry : attributes.asMap().entrySet()) { + if (!span.getAttributes().asMap().containsKey(entry.getKey())) { + return false; + } + if (!Objects.equals(entry.getValue(), span.getAttributes().get(entry.getKey()))) { + return false; + } + } + return true; + } + + void assertParent(String expectedParent, String child, List spans) { + SpanData parentSpan = getSpan(expectedParent, spans); + SpanData childSpan = getSpan(child, spans); + assertEquals(parentSpan.getSpanId(), childSpan.getParentSpanId()); + } + + void assertParent( + String expectedParent, String child, Attributes attributes, List spans) { + SpanData parentSpan = getSpan(expectedParent, spans); + List childSpans = getSpans(child, attributes, spans); + for (SpanData childSpan : childSpans) { + assertEquals(parentSpan.getSpanId(), childSpan.getParentSpanId()); + } + } + + SpanData getSpan(String name, List spans) { + return spans.stream() + .filter(span -> span.getName().equals(name)) + .findAny() + .orElseThrow(() -> new IllegalArgumentException("Span " + name + " not found")); + } + + SpanData getSpan(String name, Attributes attributes, List spans) { + return spans.stream() + .filter(span -> equalsSpan(span, name, attributes)) + .findAny() + .orElseThrow(() -> new IllegalArgumentException("Span " + name + " not found")); + } + + List getSpans(String name, Attributes attributes, List spans) { + return spans.stream() + .filter(span -> equalsSpan(span, name, attributes)) + .collect(Collectors.toList()); + } + + private String spansToString(List spans) { + return spans.stream().map(SpanData::getName).collect(Collectors.joining("\n", "\n", "\n")); + } + + private void addUpdateDdlResponse() { + mockDatabaseAdmin.addResponse( + Operation.newBuilder() + .setMetadata( + Any.pack( + UpdateDatabaseDdlMetadata.newBuilder() + .setDatabase("projects/proj/instances/inst/databases/db") + .build())) + .setName("projects/proj/instances/inst/databases/db/operations/1") + .setDone(true) + .setResponse(Any.pack(Empty.getDefaultInstance())) + .build()); + } +} diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/PartitionedQueryMockServerTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/PartitionedQueryMockServerTest.java index 95bb4fb8fa0..655ca0de586 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/PartitionedQueryMockServerTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/PartitionedQueryMockServerTest.java @@ -417,6 +417,94 @@ public void testRunEmptyPartitionedQuery() { assertEquals(1, mockSpanner.countRequestsOfType(PartitionQueryRequest.class)); } + @Test + public void testGetMetadataWithoutNextCall() { + int generatedRowCount = 1; + RandomResultSetGenerator generator = new RandomResultSetGenerator(generatedRowCount); + Statement statement = + Statement.newBuilder("select * from random_table where active=@active") + .bind("active") + .to(true) + .build(); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + + int maxPartitions = 1; + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + try (PartitionedQueryResultSet resultSet = + connection.runPartitionedQuery( + statement, PartitionOptions.newBuilder().setMaxPartitions(maxPartitions).build())) { + assertNotNull(resultSet.getMetadata()); + assertEquals(24, resultSet.getMetadata().getRowType().getFieldsCount()); + assertNotNull(resultSet.getType()); + assertEquals(24, resultSet.getType().getStructFields().size()); + + assertTrue(resultSet.next()); + assertNotNull(resultSet.getMetadata()); + assertEquals(24, resultSet.getMetadata().getRowType().getFieldsCount()); + assertNotNull(resultSet.getType()); + assertEquals(24, resultSet.getType().getStructFields().size()); + + assertFalse(resultSet.next()); + } + } + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(PartitionQueryRequest.class)); + } + + @Test + public void testGetMetadataWithoutNextCallOnEmptyResultSet() { + int generatedRowCount = 0; + RandomResultSetGenerator generator = new RandomResultSetGenerator(generatedRowCount); + Statement statement = + Statement.newBuilder("select * from random_table where active=@active") + .bind("active") + .to(true) + .build(); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + + int maxPartitions = 1; + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + try (PartitionedQueryResultSet resultSet = + connection.runPartitionedQuery( + statement, PartitionOptions.newBuilder().setMaxPartitions(maxPartitions).build())) { + assertNotNull(resultSet.getMetadata()); + assertEquals(24, resultSet.getMetadata().getRowType().getFieldsCount()); + assertNotNull(resultSet.getType()); + assertEquals(24, resultSet.getType().getStructFields().size()); + + assertFalse(resultSet.next()); + } + } + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(PartitionQueryRequest.class)); + } + + @Test + public void testGetMetadataWithoutNextCallOnResultSetWithError() { + Statement statement = + Statement.newBuilder("select * from random_table where active=@active") + .bind("active") + .to(true) + .build(); + mockSpanner.putStatementResult( + StatementResult.exception(statement, Status.NOT_FOUND.asRuntimeException())); + + int maxPartitions = 1; + try (Connection connection = createConnection()) { + connection.setAutocommit(true); + try (PartitionedQueryResultSet resultSet = + connection.runPartitionedQuery( + statement, PartitionOptions.newBuilder().setMaxPartitions(maxPartitions).build())) { + assertThrows(SpannerException.class, resultSet::getMetadata); + assertThrows(SpannerException.class, resultSet::getType); + } + } + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + assertEquals(1, mockSpanner.countRequestsOfType(PartitionQueryRequest.class)); + } + @Test public void testRunPartitionedQueryUsingSql() { int generatedRowCount = 20; diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/PgDurationConverterTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/PgDurationConverterTest.java index b3b0ff1fff5..95bd97962a8 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/PgDurationConverterTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/PgDurationConverterTest.java @@ -23,7 +23,7 @@ import com.google.cloud.spanner.Dialect; import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.PgDurationConverter; -import com.google.protobuf.Duration; +import java.time.Duration; import java.util.concurrent.TimeUnit; import org.junit.Test; import org.junit.runner.RunWith; @@ -39,41 +39,33 @@ public void testConvert() throws CompileException { assertNotNull(allowedValues); PgDurationConverter converter = new PgDurationConverter(allowedValues); - assertEquals(Duration.newBuilder().setNanos(1000000).build(), converter.convert("1")); - assertEquals(Duration.newBuilder().setSeconds(1L).build(), converter.convert("1000")); - assertEquals( - Duration.newBuilder().setSeconds(1L).setNanos(1000000).build(), converter.convert("1001")); + assertEquals(Duration.ofNanos(1000000), converter.convert("1")); + assertEquals(Duration.ofSeconds(1L), converter.convert("1000")); + assertEquals(Duration.ofSeconds(1L, 1000000), converter.convert("1001")); assertEquals( - Duration.newBuilder().setNanos((int) TimeUnit.MILLISECONDS.toNanos(100L)).build(), - converter.convert("'100ms'")); + Duration.ofNanos((int) TimeUnit.MILLISECONDS.toNanos(100L)), converter.convert("'100ms'")); assertNull(converter.convert("'0ms'")); assertNull(converter.convert("'-100ms'")); + assertEquals(Duration.ofSeconds(315576000000L), converter.convert("'315576000000000ms'")); + assertEquals(Duration.ofSeconds(1L), converter.convert("'1s'")); assertEquals( - Duration.newBuilder().setSeconds(315576000000L).build(), - converter.convert("'315576000000000ms'")); - assertEquals(Duration.newBuilder().setSeconds(1L).build(), converter.convert("'1s'")); - assertEquals( - Duration.newBuilder() - .setSeconds(1L) - .setNanos((int) TimeUnit.MILLISECONDS.toNanos(1L)) - .build(), + Duration.ofSeconds(1L, (int) TimeUnit.MILLISECONDS.toNanos(1L)), converter.convert("'1001ms'")); - assertEquals(Duration.newBuilder().setNanos(1).build(), converter.convert("'1ns'")); - assertEquals(Duration.newBuilder().setNanos(1000).build(), converter.convert("'1us'")); - assertEquals(Duration.newBuilder().setNanos(1000000).build(), converter.convert("'1ms'")); - assertEquals( - Duration.newBuilder().setNanos(999999999).build(), converter.convert("'999999999ns'")); - assertEquals(Duration.newBuilder().setSeconds(1L).build(), converter.convert("'1s'")); + assertEquals(Duration.ofNanos(1), converter.convert("'1ns'")); + assertEquals(Duration.ofNanos(1000), converter.convert("'1us'")); + assertEquals(Duration.ofNanos(1000000), converter.convert("'1ms'")); + assertEquals(Duration.ofNanos(999999999), converter.convert("'999999999ns'")); + assertEquals(Duration.ofSeconds(1L), converter.convert("'1s'")); assertNull(converter.convert("''")); assertNull(converter.convert("' '")); assertNull(converter.convert("'random string'")); - assertEquals(Duration.getDefaultInstance(), converter.convert("default")); - assertEquals(Duration.getDefaultInstance(), converter.convert("DEFAULT")); - assertEquals(Duration.getDefaultInstance(), converter.convert("Default")); + assertEquals(Duration.ZERO, converter.convert("default")); + assertEquals(Duration.ZERO, converter.convert("DEFAULT")); + assertEquals(Duration.ZERO, converter.convert("Default")); assertNull(converter.convert("'default'")); assertNull(converter.convert("'DEFAULT'")); assertNull(converter.convert("'Default'")); diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ProtoDescriptorsConverterTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ProtoDescriptorsConverterTest.java new file mode 100644 index 00000000000..766bc987d37 --- /dev/null +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ProtoDescriptorsConverterTest.java @@ -0,0 +1,64 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.ProtoDescriptorsConverter; +import com.google.common.io.ByteStreams; +import java.io.InputStream; +import java.util.Base64; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ProtoDescriptorsConverterTest { + @Test + public void testConvert() throws CompileException { + String allowedValues = + ReadOnlyStalenessConverterTest.getAllowedValues( + ProtoDescriptorsConverter.class, Dialect.GOOGLE_STANDARD_SQL); + assertNotNull(allowedValues); + ProtoDescriptorsConverter converter = new ProtoDescriptorsConverter(allowedValues); + + byte[] protoDescriptors; + try { + InputStream in = + ProtoDescriptorsConverterTest.class + .getClassLoader() + .getResourceAsStream("com/google/cloud/spanner/descriptors.pb"); + assertNotNull(in); + protoDescriptors = ByteStreams.toByteArray(in); + } catch (Exception e) { + throw SpannerExceptionFactory.newSpannerException(e); + } + + assertNull(converter.convert("")); + assertNull(converter.convert("null")); + assertNull(converter.convert(null)); + assertNull(converter.convert("random string")); + + assertArrayEquals( + converter.convert(Base64.getEncoder().encodeToString(protoDescriptors)), protoDescriptors); + } +} diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ProtoDescriptorsFileConverterTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ProtoDescriptorsFileConverterTest.java new file mode 100644 index 00000000000..29e4a2b591b --- /dev/null +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ProtoDescriptorsFileConverterTest.java @@ -0,0 +1,46 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; +import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.ProtoDescriptorsFileConverter; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ProtoDescriptorsFileConverterTest { + @Test + public void testConvert() throws CompileException { + String allowedValues = + ReadOnlyStalenessConverterTest.getAllowedValues( + ProtoDescriptorsFileConverter.class, Dialect.GOOGLE_STANDARD_SQL); + assertNotNull(allowedValues); + ProtoDescriptorsFileConverter converter = new ProtoDescriptorsFileConverter(allowedValues); + + assertNull(converter.convert("")); + assertNull(converter.convert(null)); + + String filePath = "com/google/cloud/spanner/descriptors.pb"; + assertEquals(converter.convert(filePath), filePath); + } +} diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/QueryOptionsTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/QueryOptionsTest.java index 627e8c7acc9..6d5d4106638 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/QueryOptionsTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/QueryOptionsTest.java @@ -30,7 +30,7 @@ public class QueryOptionsTest extends AbstractMockServerTest { @Test public void testUseOptimizerVersionFromConnectionUrl() { - try (Connection connection = createConnection("?optimizerVersion=10")) { + try (Connection connection = createConnection(";optimizerVersion=10")) { Repeat.twice( () -> { executeSelect1AndConsumeResults(connection); diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReadOnlyTransactionTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReadOnlyTransactionTest.java index c1419565b72..e243fbd620a 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReadOnlyTransactionTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReadOnlyTransactionTest.java @@ -50,6 +50,7 @@ import com.google.cloud.spanner.connection.UnitOfWork.CallType; import com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState; import com.google.spanner.v1.ResultSetStats; +import io.opentelemetry.api.trace.Span; import java.util.Arrays; import java.util.Calendar; import java.util.List; @@ -180,6 +181,7 @@ private ReadOnlyTransaction createSubject(TimestampBound staleness) { .setBatchClient(mock(BatchClient.class)) .setReadOnlyStaleness(staleness) .withStatementExecutor(new StatementExecutor()) + .setSpan(Span.getInvalid()) .build(); } @@ -243,7 +245,7 @@ public void testAbortBatch() { @Test public void testGetCommitTimestamp() { ReadOnlyTransaction transaction = createSubject(); - get(transaction.commitAsync(CallType.SYNC)); + get(transaction.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); assertThat(transaction.getState(), is(UnitOfWorkState.COMMITTED)); try { transaction.getCommitTimestamp(); @@ -256,7 +258,7 @@ public void testGetCommitTimestamp() { @Test public void testGetCommitResponse() { ReadOnlyTransaction transaction = createSubject(); - get(transaction.commitAsync(CallType.SYNC)); + get(transaction.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); try { transaction.getCommitResponse(); fail("expected FAILED_PRECONDITION"); @@ -268,7 +270,7 @@ public void testGetCommitResponse() { @Test public void testGetCommitResponseOrNull() { ReadOnlyTransaction transaction = createSubject(); - get(transaction.commitAsync(CallType.SYNC)); + get(transaction.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); assertNull(transaction.getCommitResponseOrNull()); } @@ -320,6 +322,7 @@ public void testExecuteQueryWithOptionsTest() { .setBatchClient(mock(BatchClient.class)) .setReadOnlyStaleness(TimestampBound.strong()) .withStatementExecutor(new StatementExecutor()) + .setSpan(Span.getInvalid()) .build(); ResultSet expectedWithOptions = DirectExecuteResultSet.ofResultSet(resWithOptions); assertThat( @@ -427,7 +430,7 @@ public void testState() { transaction.getState(), is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.STARTED))); assertThat(transaction.isActive(), is(true)); - get(transaction.commitAsync(CallType.SYNC)); + get(transaction.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); assertThat( transaction.getState(), is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.COMMITTED))); @@ -446,7 +449,7 @@ public void testState() { is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.STARTED))); assertThat(transaction.isActive(), is(true)); - get(transaction.commitAsync(CallType.SYNC)); + get(transaction.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); assertThat( transaction.getState(), is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.COMMITTED))); @@ -458,7 +461,7 @@ public void testState() { transaction.getState(), is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.STARTED))); assertThat(transaction.isActive(), is(true)); - get(transaction.rollbackAsync(CallType.SYNC)); + get(transaction.rollbackAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); assertThat( transaction.getState(), is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.ROLLED_BACK))); @@ -476,7 +479,7 @@ public void testState() { transaction.getState(), is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.STARTED))); assertThat(transaction.isActive(), is(true)); - get(transaction.rollbackAsync(CallType.SYNC)); + get(transaction.rollbackAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); assertThat( transaction.getState(), is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.ROLLED_BACK))); diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReadWriteTransactionTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReadWriteTransactionTest.java index 8e643cf6e24..9fbb5b5bf16 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReadWriteTransactionTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/ReadWriteTransactionTest.java @@ -62,6 +62,7 @@ import io.grpc.Metadata; import io.grpc.StatusRuntimeException; import io.grpc.protobuf.ProtoUtils; +import io.opentelemetry.api.trace.Span; import java.math.BigDecimal; import java.util.Arrays; import java.util.Collections; @@ -176,6 +177,7 @@ private ReadWriteTransaction createSubject( .setSavepointSupport(SavepointSupport.FAIL_AFTER_ROLLBACK) .setTransactionRetryListeners(Collections.emptyList()) .withStatementExecutor(new StatementExecutor()) + .setSpan(Span.getInvalid()) .build(); } @@ -306,7 +308,7 @@ public void testGetCommitTimestampAfterCommit() { ReadWriteTransaction transaction = createSubject(); assertThat(get(transaction.executeUpdateAsync(CallType.SYNC, parsedStatement)), is(1L)); - get(transaction.commitAsync(CallType.SYNC)); + get(transaction.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); assertThat(transaction.getCommitTimestamp(), is(notNullValue())); } @@ -352,7 +354,7 @@ public void testState() { is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.STARTED))); assertThat(transaction.isActive(), is(true)); - get(transaction.commitAsync(CallType.SYNC)); + get(transaction.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); assertThat( transaction.getState(), is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.COMMITTED))); @@ -364,7 +366,7 @@ public void testState() { transaction.getState(), is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.STARTED))); assertThat(transaction.isActive(), is(true)); - get(transaction.rollbackAsync(CallType.SYNC)); + get(transaction.rollbackAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); assertThat( transaction.getState(), is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.ROLLED_BACK))); @@ -377,7 +379,7 @@ public void testState() { is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.STARTED))); assertThat(transaction.isActive(), is(true)); try { - get(transaction.commitAsync(CallType.SYNC)); + get(transaction.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); } catch (SpannerException e) { // ignore } @@ -393,7 +395,7 @@ public void testState() { is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.STARTED))); assertThat(transaction.isActive(), is(true)); try { - get(transaction.commitAsync(CallType.SYNC)); + get(transaction.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); } catch (AbortedException e) { // ignore } @@ -409,7 +411,7 @@ public void testState() { transaction.getState(), is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.STARTED))); assertThat(transaction.isActive(), is(true)); - get(transaction.commitAsync(CallType.SYNC)); + get(transaction.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); assertThat( transaction.getState(), is(equalTo(com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState.COMMITTED))); @@ -475,12 +477,13 @@ public void testRetry() { .setTransactionRetryListeners(Collections.emptyList()) .setDatabaseClient(client) .withStatementExecutor(new StatementExecutor()) + .setSpan(Span.getInvalid()) .build(); subject.executeUpdateAsync(CallType.SYNC, update1); subject.executeUpdateAsync(CallType.SYNC, update2); boolean expectedException = false; try { - get(subject.commitAsync(CallType.SYNC)); + get(subject.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); } catch (SpannerException e) { if (results == RetryResults.DIFFERENT && e.getErrorCode() == ErrorCode.ABORTED) { // expected @@ -503,6 +506,7 @@ public void testChecksumResultSet() { .setTransactionRetryListeners(Collections.emptyList()) .setDatabaseClient(client) .withStatementExecutor(new StatementExecutor()) + .setSpan(Span.getInvalid()) .build(); ParsedStatement parsedStatement = mock(ParsedStatement.class); Statement statement = Statement.of("SELECT * FROM FOO"); @@ -737,6 +741,7 @@ public void testChecksumResultSetWithArray() { .setTransactionRetryListeners(Collections.emptyList()) .setDatabaseClient(client) .withStatementExecutor(new StatementExecutor()) + .setSpan(Span.getInvalid()) .build(); ParsedStatement parsedStatement = mock(ParsedStatement.class); Statement statement = Statement.of("SELECT * FROM FOO"); @@ -821,7 +826,7 @@ public void testGetCommitResponseAfterCommit() { ReadWriteTransaction transaction = createSubject(); get(transaction.executeUpdateAsync(CallType.SYNC, parsedStatement)); - get(transaction.commitAsync(CallType.SYNC)); + get(transaction.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE)); assertNotNull(transaction.getCommitResponse()); assertNotNull(transaction.getCommitResponseOrNull()); diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/RpcPriorityConverterTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/RpcPriorityConverterTest.java index 50436bc8678..84fd326d08a 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/RpcPriorityConverterTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/RpcPriorityConverterTest.java @@ -19,9 +19,9 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; +import com.google.cloud.spanner.Options.RpcPriority; import com.google.cloud.spanner.connection.ClientSideStatementImpl.CompileException; import com.google.cloud.spanner.connection.ClientSideStatementValueConverters.RpcPriorityConverter; -import com.google.spanner.v1.RequestOptions.Priority; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; @@ -34,18 +34,18 @@ public void testConvert() throws CompileException { String allowedValues = "'(HIGH|MEDIUM|LOW|NULL)'"; RpcPriorityConverter converter = new ClientSideStatementValueConverters.RpcPriorityConverter(allowedValues); - assertEquals(Priority.PRIORITY_HIGH, converter.convert("high")); - assertEquals(Priority.PRIORITY_HIGH, converter.convert("HIGH")); - assertEquals(Priority.PRIORITY_HIGH, converter.convert("High")); + assertEquals(RpcPriority.HIGH, converter.convert("high")); + assertEquals(RpcPriority.HIGH, converter.convert("HIGH")); + assertEquals(RpcPriority.HIGH, converter.convert("High")); - assertEquals(Priority.PRIORITY_MEDIUM, converter.convert("medium")); - assertEquals(Priority.PRIORITY_MEDIUM, converter.convert("Medium")); + assertEquals(RpcPriority.MEDIUM, converter.convert("medium")); + assertEquals(RpcPriority.MEDIUM, converter.convert("Medium")); - assertEquals(Priority.PRIORITY_LOW, converter.convert("Low")); + assertEquals(RpcPriority.LOW, converter.convert("Low")); assertNull(converter.convert("")); assertNull(converter.convert(" ")); assertNull(converter.convert("random string")); - assertEquals(Priority.PRIORITY_UNSPECIFIED, converter.convert("NULL")); + assertEquals(RpcPriority.UNSPECIFIED, converter.convert("NULL")); } } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SavepointMockServerTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SavepointMockServerTest.java index e2ab63b3213..31972481629 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SavepointMockServerTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SavepointMockServerTest.java @@ -39,6 +39,7 @@ import com.google.spanner.v1.RollbackRequest; import java.util.Collection; import java.util.List; +import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; import org.junit.After; import org.junit.Before; @@ -678,4 +679,60 @@ public void testRollbackToSavepointWithoutInternalRetriesInReadOnlyTransaction() } } } + + @Test + public void testKeepAlive() throws InterruptedException, TimeoutException { + String keepAliveTag = "test_keep_alive_tag"; + System.setProperty("spanner.connection.keep_alive_interval_millis", "1"); + System.setProperty("spanner.connection.keep_alive_query_tag", keepAliveTag); + try (Connection connection = createConnection()) { + connection.setSavepointSupport(SavepointSupport.ENABLED); + connection.setKeepTransactionAlive(true); + // Start a transaction by executing a statement. + connection.execute(INSERT_STATEMENT); + // Verify that we get a keep-alive request. + verifyHasKeepAliveRequest(keepAliveTag); + // Set a savepoint, execute another statement, and rollback to the savepoint. + // The keep-alive should not be sent after the transaction has been rolled back to the + // savepoint. + connection.savepoint("s1"); + connection.execute(INSERT_STATEMENT); + connection.rollbackToSavepoint("s1"); + mockSpanner.waitForRequestsToContain(RollbackRequest.class, 1000L); + String keepAliveTagAfterRollback = "test_keep_alive_tag_after_rollback"; + System.setProperty("spanner.connection.keep_alive_query_tag", keepAliveTagAfterRollback); + + // Verify that we don't get any new keep-alive requests from this point. + Thread.sleep(2L); + assertEquals(0, countKeepAliveRequest(keepAliveTagAfterRollback)); + // Resume the transaction and verify that we get a keep-alive again. + connection.execute(INSERT_STATEMENT); + verifyHasKeepAliveRequest(keepAliveTagAfterRollback); + } finally { + System.clearProperty("spanner.connection.keep_alive_interval_millis"); + System.clearProperty("spanner.connection.keep_alive_query_tag"); + } + } + + private void verifyHasKeepAliveRequest(String tag) throws InterruptedException, TimeoutException { + mockSpanner.waitForRequestsToContain( + r -> { + if (!(r instanceof ExecuteSqlRequest)) { + return false; + } + ExecuteSqlRequest request = (ExecuteSqlRequest) r; + return request.getSql().equals("SELECT 1") + && request.getRequestOptions().getRequestTag().equals(tag); + }, + 1000L); + } + + private long countKeepAliveRequest(String tag) { + return mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter( + request -> + request.getSql().equals("SELECT 1") + && request.getRequestOptions().getRequestTag().equals(tag)) + .count(); + } } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SavepointTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SavepointTest.java index e5ec5c9478c..1ea76845c40 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SavepointTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SavepointTest.java @@ -24,6 +24,7 @@ import com.google.cloud.spanner.SpannerException; import com.google.cloud.spanner.connection.AbstractMultiUseTransaction.Savepoint; import com.google.common.collect.ImmutableList; +import io.opentelemetry.api.trace.Span; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; @@ -32,7 +33,10 @@ public class SavepointTest { static class TestTransaction extends ReadOnlyTransaction { TestTransaction() { - super(ReadOnlyTransaction.newBuilder().withStatementExecutor(mock(StatementExecutor.class))); + super( + ReadOnlyTransaction.newBuilder() + .setSpan(Span.getInvalid()) + .withStatementExecutor(mock(StatementExecutor.class))); } } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SingleUseTransactionTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SingleUseTransactionTest.java index 60a5ede129a..6edf46b5623 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SingleUseTransactionTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SingleUseTransactionTest.java @@ -22,6 +22,7 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.fail; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; @@ -57,8 +58,11 @@ import com.google.cloud.spanner.connection.StatementExecutor.StatementTimeout; import com.google.cloud.spanner.connection.UnitOfWork.CallType; import com.google.common.base.Preconditions; +import com.google.common.io.ByteStreams; import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; import com.google.spanner.v1.ResultSetStats; +import io.opentelemetry.api.trace.Span; +import java.io.InputStream; import java.util.Arrays; import java.util.Calendar; import java.util.Collections; @@ -300,8 +304,8 @@ private DdlClient createDefaultMockDdlClient() { final OperationFuture operation = mock(OperationFuture.class); when(operation.get()).thenReturn(null); - when(ddlClient.executeDdl(anyString())).thenCallRealMethod(); - when(ddlClient.executeDdl(anyList())).thenReturn(operation); + when(ddlClient.executeDdl(anyString(), any())).thenCallRealMethod(); + when(ddlClient.executeDdl(anyList(), any())).thenReturn(operation); return ddlClient; } catch (Exception e) { throw new RuntimeException(e); @@ -315,7 +319,8 @@ private SingleUseTransaction createSubject() { TimestampBound.strong(), AutocommitDmlMode.TRANSACTIONAL, CommitBehavior.SUCCEED, - 0L); + 0L, + null); } private SingleUseTransaction createSubject(AutocommitDmlMode dmlMode) { @@ -325,7 +330,8 @@ private SingleUseTransaction createSubject(AutocommitDmlMode dmlMode) { TimestampBound.strong(), dmlMode, CommitBehavior.SUCCEED, - 0L); + 0L, + null); } private SingleUseTransaction createSubject(CommitBehavior commitBehavior) { @@ -335,7 +341,8 @@ private SingleUseTransaction createSubject(CommitBehavior commitBehavior) { TimestampBound.strong(), AutocommitDmlMode.TRANSACTIONAL, commitBehavior, - 0L); + 0L, + null); } private SingleUseTransaction createDdlSubject(DdlClient ddlClient) { @@ -345,7 +352,20 @@ private SingleUseTransaction createDdlSubject(DdlClient ddlClient) { TimestampBound.strong(), AutocommitDmlMode.TRANSACTIONAL, CommitBehavior.SUCCEED, - 0L); + 0L, + null); + } + + private SingleUseTransaction createProtoDescriptorsSubject( + DdlClient ddlClient, byte[] protoDescriptors) { + return createSubject( + ddlClient, + false, + TimestampBound.strong(), + AutocommitDmlMode.TRANSACTIONAL, + CommitBehavior.SUCCEED, + 0L, + protoDescriptors); } private SingleUseTransaction createReadOnlySubject(TimestampBound staleness) { @@ -355,7 +375,8 @@ private SingleUseTransaction createReadOnlySubject(TimestampBound staleness) { staleness, AutocommitDmlMode.TRANSACTIONAL, CommitBehavior.SUCCEED, - 0L); + 0L, + null); } private SingleUseTransaction createSubject( @@ -364,7 +385,8 @@ private SingleUseTransaction createSubject( TimestampBound staleness, AutocommitDmlMode dmlMode, final CommitBehavior commitBehavior, - long timeout) { + long timeout, + byte[] protoDescriptors) { DatabaseClient dbClient = mock(DatabaseClient.class); com.google.cloud.spanner.ReadOnlyTransaction singleUse = new SimpleReadOnlyTransaction(staleness); @@ -452,6 +474,8 @@ public TransactionRunner allowNestedTransaction() { .setStatementTimeout( timeout == 0L ? nullTimeout() : timeout(timeout, TimeUnit.MILLISECONDS)) .withStatementExecutor(executor) + .setSpan(Span.getInvalid()) + .setProtoDescriptors(protoDescriptors) .build(); } @@ -492,7 +516,7 @@ private List getTestTimestampBounds() { public void testCommit() { SingleUseTransaction subject = createSubject(); try { - subject.commitAsync(CallType.SYNC); + subject.commitAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE); fail("missing expected exception"); } catch (SpannerException e) { assertThat(e.getErrorCode()).isEqualTo(ErrorCode.FAILED_PRECONDITION); @@ -503,7 +527,7 @@ public void testCommit() { public void testRollback() { SingleUseTransaction subject = createSubject(); try { - subject.rollbackAsync(CallType.SYNC); + subject.rollbackAsync(CallType.SYNC, NoopEndTransactionCallback.INSTANCE); fail("missing expected exception"); } catch (SpannerException e) { assertThat(e.getErrorCode()).isEqualTo(ErrorCode.FAILED_PRECONDITION); @@ -539,7 +563,34 @@ public void testExecuteDdl() { DdlClient ddlClient = createDefaultMockDdlClient(); SingleUseTransaction subject = createDdlSubject(ddlClient); get(subject.executeDdlAsync(CallType.SYNC, ddl)); - verify(ddlClient).executeDdl(sql); + verify(ddlClient).executeDdl(sql, null); + } + + @Test + public void testExecuteDdlWithProtoDescriptors() { + String sql = "CREATE TABLE FOO"; + ParsedStatement ddl = createParsedDdl(sql); + DdlClient ddlClient = createDefaultMockDdlClient(); + // verify when protoDescriptors value is null + SingleUseTransaction subject = createProtoDescriptorsSubject(ddlClient, null); + get(subject.executeDdlAsync(CallType.SYNC, ddl)); + verify(ddlClient).executeDdl(sql, null); + + // verify when protoDescriptors value is not null + byte[] protoDescriptors; + try { + InputStream in = + SingleUseTransactionTest.class + .getClassLoader() + .getResourceAsStream("com/google/cloud/spanner/descriptors.pb"); + assertNotNull(in); + protoDescriptors = ByteStreams.toByteArray(in); + } catch (Exception e) { + throw SpannerExceptionFactory.newSpannerException(e); + } + subject = createProtoDescriptorsSubject(ddlClient, protoDescriptors); + get(subject.executeDdlAsync(CallType.SYNC, ddl)); + verify(ddlClient).executeDdl(sql, protoDescriptors); } @Test @@ -616,6 +667,7 @@ public void testExecuteQueryWithOptionsTest() { .setAutocommitDmlMode(AutocommitDmlMode.TRANSACTIONAL) .withStatementExecutor(executor) .setReadOnlyStaleness(TimestampBound.strong()) + .setSpan(Span.getInvalid()) .build(); assertThat( get( @@ -732,7 +784,7 @@ public void testMultiUse() { DdlClient ddlClient = createDefaultMockDdlClient(); SingleUseTransaction subject = createDdlSubject(ddlClient); get(subject.executeDdlAsync(CallType.SYNC, ddl)); - verify(ddlClient).executeDdl(sql); + verify(ddlClient).executeDdl(sql, null); try { get(subject.executeDdlAsync(CallType.SYNC, ddl)); fail("missing expected exception"); diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SpannerPoolTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SpannerPoolTest.java index b11c1f19be6..19d49139635 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SpannerPoolTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/SpannerPoolTest.java @@ -515,6 +515,54 @@ public void testSpannerPoolKeyEquality() { assertNotEquals(key4, key5); } + @Test + public void testEnableApiTracing() { + SpannerPoolKey keyWithoutApiTracingConfig = + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri("cloudspanner:/projects/p/instances/i/databases/d") + .setCredentials(NoCredentials.getInstance()) + .build()); + SpannerPoolKey keyWithApiTracingEnabled = + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri("cloudspanner:/projects/p/instances/i/databases/d?enableApiTracing=true") + .setCredentials(NoCredentials.getInstance()) + .build()); + SpannerPoolKey keyWithApiTracingDisabled = + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri("cloudspanner:/projects/p/instances/i/databases/d?enableApiTracing=false") + .setCredentials(NoCredentials.getInstance()) + .build()); + + assertNotEquals(keyWithoutApiTracingConfig, keyWithApiTracingEnabled); + assertNotEquals(keyWithoutApiTracingConfig, keyWithApiTracingDisabled); + assertNotEquals(keyWithApiTracingEnabled, keyWithApiTracingDisabled); + + assertEquals( + keyWithApiTracingEnabled, + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri("cloudspanner:/projects/p/instances/i/databases/d?enableApiTracing=true") + .setCredentials(NoCredentials.getInstance()) + .build())); + assertEquals( + keyWithApiTracingDisabled, + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri("cloudspanner:/projects/p/instances/i/databases/d?enableApiTracing=false") + .setCredentials(NoCredentials.getInstance()) + .build())); + assertEquals( + keyWithoutApiTracingConfig, + SpannerPoolKey.of( + ConnectionOptions.newBuilder() + .setUri("cloudspanner:/projects/p/instances/i/databases/d") + .setCredentials(NoCredentials.getInstance()) + .build())); + } + @Test public void testOpenTelemetry() { SpannerPool pool = createSubjectAndMocks(); diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/StatementResultImplTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/StatementResultImplTest.java index c28d3b75b95..0a55ff2c420 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/StatementResultImplTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/StatementResultImplTest.java @@ -21,9 +21,13 @@ import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; +import com.google.cloud.ByteArray; import com.google.cloud.Timestamp; import com.google.cloud.spanner.ErrorCode; import com.google.cloud.spanner.ResultSet; @@ -154,6 +158,18 @@ public void testStringResultSetGetResultSet() { assertThat(subject.getResultSet().next(), is(true)); assertThat(subject.getResultSet().getString("foo"), is(equalTo("bar"))); assertThat(subject.getResultSet().next(), is(false)); + + subject = + StatementResultImpl.resultSet( + "path", "descriptors.pb", ClientSideStatementType.SHOW_PROTO_DESCRIPTORS_FILE_PATH); + assertThat(subject.getResultType(), is(equalTo(ResultType.RESULT_SET))); + assertThat( + subject.getClientSideStatementType(), + is(equalTo(ClientSideStatementType.SHOW_PROTO_DESCRIPTORS_FILE_PATH))); + assertThat(subject.getResultSet(), is(notNullValue())); + assertThat(subject.getResultSet().next(), is(true)); + assertThat(subject.getResultSet().getString("path"), is(equalTo("descriptors.pb"))); + assertThat(subject.getResultSet().next(), is(false)); } @Test @@ -190,4 +206,19 @@ public void testTimestampResultSetGetResultSet() { is(equalTo(Timestamp.ofTimeSecondsAndNanos(10L, 10)))); assertThat(subject.getResultSet().next(), is(false)); } + + @Test + public void testBytesResultSetGetResultSet() { + StatementResult subject = + StatementResultImpl.resultSet( + "foo", "protoDescriptors".getBytes(), ClientSideStatementType.SHOW_PROTO_DESCRIPTORS); + assertEquals(subject.getResultType(), ResultType.RESULT_SET); + assertEquals( + subject.getClientSideStatementType(), ClientSideStatementType.SHOW_PROTO_DESCRIPTORS); + assertNotNull(subject.getResultSet()); + assertTrue(subject.getResultSet().next()); + assertEquals( + subject.getResultSet().getBytes("foo"), ByteArray.copyFrom("protoDescriptors".getBytes())); + assertFalse(subject.getResultSet().next()); + } } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITBulkConnectionTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITBulkConnectionTest.java index 9cc46e6ee13..42358a647a1 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITBulkConnectionTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITBulkConnectionTest.java @@ -36,8 +36,8 @@ /** * Test opening multiple generic (not JDBC) Spanner connections. This test should not be run in - * parallel with other tests, as it tries to close all active connections, and should not try to - * close connections of other integration tests. + * parallel with other tests in the same JVM, as it tries to close all active connections, and + * should not try to close connections of other integration tests. */ @Category(SerialIntegrationTest.class) @RunWith(JUnit4.class) diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITDmlReturningTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITDmlReturningTest.java index 5e5ca800402..83f9beb7cf7 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITDmlReturningTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITDmlReturningTest.java @@ -22,12 +22,13 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; -import static org.junit.Assume.assumeFalse; import com.google.cloud.spanner.AsyncResultSet; import com.google.cloud.spanner.AsyncResultSet.CallbackResponse; +import com.google.cloud.spanner.DatabaseClient; import com.google.cloud.spanner.Dialect; import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.KeySet; import com.google.cloud.spanner.Mutation; import com.google.cloud.spanner.ParallelIntegrationTest; import com.google.cloud.spanner.ResultSet; @@ -39,15 +40,15 @@ import com.google.cloud.spanner.connection.StatementResult; import com.google.cloud.spanner.connection.StatementResult.ResultType; import com.google.cloud.spanner.connection.TransactionMode; -import com.google.cloud.spanner.testing.EmulatorSpannerHelper; +import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; +import java.util.HashSet; import java.util.List; -import java.util.Map; +import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import org.junit.Before; @@ -81,12 +82,7 @@ public class ITDmlReturningTest extends ITAbstractSpannerTest { + " SingerId BIGINT PRIMARY KEY," + " FirstName character varying(1024)," + " LastName character varying(1024))"); - private final Map IS_INITIALIZED = new HashMap<>(); - - public ITDmlReturningTest() { - IS_INITIALIZED.put(Dialect.GOOGLE_STANDARD_SQL, false); - IS_INITIALIZED.put(Dialect.POSTGRESQL, false); - } + private static final Set IS_INITIALIZED = new HashSet<>(); @Parameter public Dialect dialect; @@ -96,41 +92,34 @@ public static Object[] data() { } private boolean checkAndSetInitialized() { - if ((dialect == Dialect.GOOGLE_STANDARD_SQL) && !IS_INITIALIZED.get(dialect)) { - IS_INITIALIZED.put(dialect, true); - return true; - } - if ((dialect == Dialect.POSTGRESQL) && !IS_INITIALIZED.get(dialect)) { - IS_INITIALIZED.put(dialect, true); - return true; - } - return false; + return !IS_INITIALIZED.add(dialect); } @Before public void setupTable() { - assumeFalse( - "DML Returning is not supported in the emulator", EmulatorSpannerHelper.isUsingEmulator()); - if (checkAndSetInitialized()) { + if (!checkAndSetInitialized()) { database = env.getTestHelper() .createTestDatabase(dialect, Collections.singleton(DDL_MAP.get(dialect))); - List firstNames = Arrays.asList("ABC", "ABC", "DEF", "PQR", "ABC"); - List lastNames = Arrays.asList("XYZ", "DEF", "XYZ", "ABC", "GHI"); - List mutations = new ArrayList<>(); - for (int id = 1; id <= 5; id++) { - mutations.add( - Mutation.newInsertBuilder("SINGERS") - .set("SINGERID") - .to(id) - .set("FIRSTNAME") - .to(firstNames.get(id - 1)) - .set("LASTNAME") - .to(lastNames.get(id - 1)) - .build()); - } - env.getTestHelper().getDatabaseClient(database).write(mutations); } + DatabaseClient client = env.getTestHelper().getDatabaseClient(database); + client.write(ImmutableList.of(Mutation.delete("SINGERS", KeySet.all()))); + + List firstNames = Arrays.asList("ABC", "ABC", "DEF", "PQR", "ABC"); + List lastNames = Arrays.asList("XYZ", "DEF", "XYZ", "ABC", "GHI"); + List mutations = new ArrayList<>(); + for (int id = 1; id <= 5; id++) { + mutations.add( + Mutation.newInsertBuilder("SINGERS") + .set("SINGERID") + .to(id) + .set("FIRSTNAME") + .to(firstNames.get(id - 1)) + .set("LASTNAME") + .to(lastNames.get(id - 1)) + .build()); + } + env.getTestHelper().getDatabaseClient(database).write(mutations); } @Test @@ -211,9 +200,9 @@ public void testDmlReturningExecuteUpdateAsync() { public void testDmlReturningExecuteBatchUpdate() { try (Connection connection = createConnection()) { connection.setAutocommit(false); - final Statement UPDATE_STMT = UPDATE_RETURNING_MAP.get(dialect); + final Statement updateStmt = Preconditions.checkNotNull(UPDATE_RETURNING_MAP.get(dialect)); long[] counts = - connection.executeBatchUpdate(ImmutableList.of(UPDATE_STMT, UPDATE_STMT, UPDATE_STMT)); + connection.executeBatchUpdate(ImmutableList.of(updateStmt, updateStmt, updateStmt)); assertArrayEquals(counts, new long[] {3, 3, 3}); } } @@ -222,10 +211,10 @@ public void testDmlReturningExecuteBatchUpdate() { public void testDmlReturningExecuteBatchUpdateAsync() { try (Connection connection = createConnection()) { connection.setAutocommit(false); - final Statement UPDATE_STMT = UPDATE_RETURNING_MAP.get(dialect); + final Statement updateStmt = Preconditions.checkNotNull(UPDATE_RETURNING_MAP.get(dialect)); long[] counts = connection - .executeBatchUpdateAsync(ImmutableList.of(UPDATE_STMT, UPDATE_STMT, UPDATE_STMT)) + .executeBatchUpdateAsync(ImmutableList.of(updateStmt, updateStmt, updateStmt)) .get(); assertArrayEquals(counts, new long[] {3, 3, 3}); } catch (ExecutionException | InterruptedException e) { diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITEmulatorConcurrentTransactionsTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITEmulatorConcurrentTransactionsTest.java index 95d188f4f0a..fe651554dd5 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITEmulatorConcurrentTransactionsTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITEmulatorConcurrentTransactionsTest.java @@ -19,6 +19,7 @@ import static com.google.cloud.spanner.testing.EmulatorSpannerHelper.isUsingEmulator; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assume.assumeTrue; @@ -26,6 +27,7 @@ import com.google.cloud.spanner.Mutation; import com.google.cloud.spanner.ParallelIntegrationTest; import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SpannerExceptionFactory; import com.google.cloud.spanner.Statement; import com.google.cloud.spanner.connection.Connection; import com.google.cloud.spanner.connection.ITAbstractSpannerTest; @@ -33,6 +35,7 @@ import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.Future; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -41,14 +44,24 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; @Category(ParallelIntegrationTest.class) -@RunWith(JUnit4.class) +@RunWith(Parameterized.class) public class ITEmulatorConcurrentTransactionsTest extends ITAbstractSpannerTest { + @Parameters(name = "Use auto-savepoints={0}") + public static Object[] parameters() { + return new Object[] {Boolean.TRUE, Boolean.FALSE}; + } + + @Parameter public boolean useAutoSavepointsForEmulator; + @Override public void appendConnectionUri(StringBuilder uri) { - uri.append(";autoConfigEmulator=true;autoCommit=false"); + uri.append(";autoConfigEmulator=true;autoCommit=false;useAutoSavepointsForEmulator=") + .append(useAutoSavepointsForEmulator); } @Override @@ -118,15 +131,21 @@ public void testSingleThreadRandomTransactions() { } @Test - public void testMultiThreadedRandomTransactions() throws InterruptedException { + public void testMultiThreadedRandomTransactions() throws Exception { int numThreads = ThreadLocalRandom.current().nextInt(10) + 5; ExecutorService executor = Executors.newFixedThreadPool(numThreads); AtomicInteger numRowsInserted = new AtomicInteger(); + List> futures = new ArrayList<>(numThreads); for (int thread = 0; thread < numThreads; thread++) { - executor.submit(() -> runRandomTransactions(numRowsInserted)); + futures.add(executor.submit(() -> runRandomTransactions(numRowsInserted))); } executor.shutdown(); - assertTrue(executor.awaitTermination(30L, TimeUnit.SECONDS)); + assertTrue(executor.awaitTermination(60L, TimeUnit.SECONDS)); + // Get the results of each transaction so the test case fails with a logical error message if + // any of the transactions failed. + for (Future future : futures) { + assertNull(future.get()); + } verifyRowCount(numRowsInserted.get()); } @@ -141,7 +160,7 @@ private void runRandomTransactions(AtomicInteger numRowsInserted) { while (!connections.isEmpty()) { int index = ThreadLocalRandom.current().nextInt(connections.size()); Connection connection = connections.get(index); - if (ThreadLocalRandom.current().nextInt(10) < 3) { + if (ThreadLocalRandom.current().nextInt(10) < 5) { connection.commit(); connection.close(); assertEquals(connection, connections.remove(index)); @@ -155,6 +174,12 @@ private void runRandomTransactions(AtomicInteger numRowsInserted) { .build())); numRowsInserted.incrementAndGet(); } + try { + // Make sure to have a small wait between statements. + Thread.sleep(ThreadLocalRandom.current().nextInt(1, 5)); + } catch (InterruptedException interruptedException) { + throw SpannerExceptionFactory.propagateInterrupt(interruptedException); + } } } finally { for (Connection connection : connections) { diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITQueryOptionsTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITQueryOptionsTest.java index 09f4ada43a8..62eba4f4315 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITQueryOptionsTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/connection/it/ITQueryOptionsTest.java @@ -16,12 +16,18 @@ package com.google.cloud.spanner.connection.it; +import com.google.cloud.spanner.ParallelIntegrationTest; import com.google.cloud.spanner.connection.ITAbstractSpannerTest; import com.google.cloud.spanner.connection.SqlScriptVerifier; import com.google.cloud.spanner.connection.SqlScriptVerifier.SpannerGenericConnection; import org.junit.Before; import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) public class ITQueryOptionsTest extends ITAbstractSpannerTest { private static final String TEST_QUERY_OPTIONS = "ITSqlScriptTest_TestQueryOptions.sql"; diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITAsyncAPITest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITAsyncAPITest.java index ebd3ab4883f..1ca69fe975d 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITAsyncAPITest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITAsyncAPITest.java @@ -42,7 +42,7 @@ import com.google.cloud.spanner.KeySet; import com.google.cloud.spanner.Mutation; import com.google.cloud.spanner.Options; -import com.google.cloud.spanner.SerialIntegrationTest; +import com.google.cloud.spanner.ParallelIntegrationTest; import com.google.cloud.spanner.SpannerException; import com.google.cloud.spanner.Statement; import com.google.cloud.spanner.Struct; @@ -58,6 +58,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.ThreadLocalRandom; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -68,7 +69,7 @@ import org.junit.runners.JUnit4; /** Integration tests for asynchronous APIs. */ -@Category(SerialIntegrationTest.class) +@Category(ParallelIntegrationTest.class) @RunWith(JUnit4.class) public class ITAsyncAPITest { @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); @@ -237,17 +238,24 @@ public void invalidDatabase() throws Exception { RemoteSpannerHelper helper = env.getTestHelper(); DatabaseClient invalidClient = helper.getClient().getDatabaseClient(DatabaseId.of(helper.getInstanceId(), "invalid")); - ApiFuture row = - invalidClient - .singleUse(TimestampBound.strong()) - .readRowAsync(TABLE_NAME, Key.of("k99"), ALL_COLUMNS); + Thread.sleep(ThreadLocalRandom.current().nextLong(100L)); try { + // The NOT_FOUND error can come from both the call to invalidClient.singleUse() as well as + // from the call to row.get(), which is why both need to be inside the try block. + ApiFuture row = + invalidClient + .singleUse(TimestampBound.strong()) + .readRowAsync(TABLE_NAME, Key.of("k99"), ALL_COLUMNS); row.get(); fail("missing expected exception"); - } catch (ExecutionException e) { - assertThat(e.getCause()).isInstanceOf(SpannerException.class); - SpannerException se = (SpannerException) e.getCause(); - assertThat(se.getErrorCode()).isEqualTo(ErrorCode.NOT_FOUND); + } catch (ExecutionException | SpannerException thrownException) { + SpannerException spannerException; + if (thrownException instanceof ExecutionException) { + spannerException = (SpannerException) thrownException.getCause(); + } else { + spannerException = (SpannerException) thrownException; + } + assertEquals(ErrorCode.NOT_FOUND, spannerException.getErrorCode()); } } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITAsyncExamplesTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITAsyncExamplesTest.java index 87f26da9049..dc5abd77afd 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITAsyncExamplesTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITAsyncExamplesTest.java @@ -33,8 +33,8 @@ import com.google.cloud.spanner.Key; import com.google.cloud.spanner.KeySet; import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ParallelIntegrationTest; import com.google.cloud.spanner.ReadOnlyTransaction; -import com.google.cloud.spanner.SerialIntegrationTest; import com.google.cloud.spanner.SpannerException; import com.google.cloud.spanner.Statement; import com.google.cloud.spanner.Struct; @@ -59,7 +59,7 @@ import org.junit.runners.JUnit4; /** Integration tests for asynchronous APIs. */ -@Category(SerialIntegrationTest.class) +@Category(ParallelIntegrationTest.class) @RunWith(JUnit4.class) public class ITAsyncExamplesTest { @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITAutogeneratedAdminClientTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITAutogeneratedAdminClientTest.java index a1c6a35819f..de5597da4f9 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITAutogeneratedAdminClientTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITAutogeneratedAdminClientTest.java @@ -266,9 +266,9 @@ private Database createAndUpdateDatabase( private String getCreateTableStatement() { if (dialect == DatabaseDialect.POSTGRESQL) { - return "CREATE TABLE T (" + " \"K\" VARCHAR PRIMARY KEY" + ")"; + return "CREATE TABLE IF NOT EXISTS T (" + " \"K\" VARCHAR PRIMARY KEY" + ")"; } else { - return "CREATE TABLE T (" + " K STRING(MAX)" + ") PRIMARY KEY (K)"; + return "CREATE TABLE IF NOT EXISTS T (" + " K STRING(MAX)" + ") PRIMARY KEY (K)"; } } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITBuiltInMetricsTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITBuiltInMetricsTest.java new file mode 100644 index 00000000000..9ff7e06e813 --- /dev/null +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITBuiltInMetricsTest.java @@ -0,0 +1,111 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.it; + +import static com.google.common.truth.Truth.assertWithMessage; + +import com.google.cloud.monitoring.v3.MetricServiceClient; +import com.google.cloud.spanner.Database; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.IntegrationTestEnv; +import com.google.cloud.spanner.ParallelIntegrationTest; +import com.google.cloud.spanner.Statement; +import com.google.common.base.Stopwatch; +import com.google.monitoring.v3.ListTimeSeriesRequest; +import com.google.monitoring.v3.ListTimeSeriesResponse; +import com.google.monitoring.v3.ProjectName; +import com.google.monitoring.v3.TimeInterval; +import com.google.protobuf.util.Timestamps; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.threeten.bp.Duration; +import org.threeten.bp.Instant; + +@Category(ParallelIntegrationTest.class) +@RunWith(JUnit4.class) +@Ignore("Built-in Metrics are not GA'ed yet. Enable this test once the metrics are released") +public class ITBuiltInMetricsTest { + + private static Database db; + @ClassRule public static IntegrationTestEnv env = new IntegrationTestEnv(); + + private static DatabaseClient client; + + private static MetricServiceClient metricClient; + + @BeforeClass + public static void setUp() throws IOException { + metricClient = MetricServiceClient.create(); + // Enable BuiltinMetrics when the metrics are GA'ed + db = env.getTestHelper().createTestDatabase(); + client = env.getTestHelper().getDatabaseClient(db); + } + + @Test + public void testBuiltinMetricsWithDefaultOTEL() throws Exception { + // This stopwatch is used for to limit fetching of metric data in verifyMetrics + Stopwatch metricsPollingStopwatch = Stopwatch.createStarted(); + Instant start = Instant.now().minus(Duration.ofMinutes(2)); + Instant end = Instant.now().plus(Duration.ofMinutes(3)); + ProjectName name = ProjectName.of(env.getTestHelper().getOptions().getProjectId()); + + TimeInterval interval = + TimeInterval.newBuilder() + .setStartTime(Timestamps.fromMillis(start.toEpochMilli())) + .setEndTime(Timestamps.fromMillis(end.toEpochMilli())) + .build(); + + client + .readWriteTransaction() + .run(transaction -> transaction.executeQuery(Statement.of("Select 1"))); + + String metricFilter = + String.format( + "metric.type=\"spanner.googleapis.com/client/%s\" " + + "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Spanner.ExecuteStreamingSql\"" + + " AND metric.labels.database=\"%s\"", + "operation_latencies", env.getTestHelper().getInstanceId(), db.getId()); + + ListTimeSeriesRequest.Builder requestBuilder = + ListTimeSeriesRequest.newBuilder() + .setName(name.toString()) + .setFilter(metricFilter) + .setInterval(interval) + .setView(ListTimeSeriesRequest.TimeSeriesView.FULL); + + ListTimeSeriesRequest request = requestBuilder.build(); + + ListTimeSeriesResponse response = metricClient.listTimeSeriesCallable().call(request); + while (response.getTimeSeriesCount() == 0 + && metricsPollingStopwatch.elapsed(TimeUnit.MINUTES) < 3) { + // Call listTimeSeries every minute + Thread.sleep(Duration.ofMinutes(1).toMillis()); + response = metricClient.listTimeSeriesCallable().call(request); + } + + assertWithMessage("View operation_latencies didn't return any data.") + .that(response.getTimeSeriesCount()) + .isGreaterThan(0); + } +} diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITProtoColumnTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITProtoColumnTest.java index 57d4d465aeb..7e525ebaa15 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITProtoColumnTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/it/ITProtoColumnTest.java @@ -21,7 +21,6 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertThrows; import static org.junit.Assume.assumeFalse; -import static org.junit.Assume.assumeTrue; import com.google.cloud.ByteArray; import com.google.cloud.spanner.Database; @@ -70,17 +69,10 @@ public class ITProtoColumnTest { private static DatabaseAdminClient dbAdminClient; private static DatabaseClient databaseClient; - public static boolean isUsingAllowlistedProject() { - String projectId = System.getProperty("spanner.gce.config.project_id", ""); - return projectId.equalsIgnoreCase("gcloud-devel") - || projectId.equalsIgnoreCase("span-cloud-testing"); - } - @BeforeClass public static void setUpDatabase() throws Exception { assumeFalse( "Proto Column is not supported in the emulator", EmulatorSpannerHelper.isUsingEmulator()); - assumeTrue("Proto Column is not yet enabled in production", isUsingAllowlistedProject()); RemoteSpannerHelper testHelper = env.getTestHelper(); databaseID = DatabaseId.of(testHelper.getInstanceId(), testHelper.getUniqueDatabaseId()); dbAdminClient = testHelper.getClient().getDatabaseAdminClient(); @@ -101,15 +93,15 @@ public static void createDatabase() throws Exception { databaseToCreate, Arrays.asList( "CREATE PROTO BUNDLE (" - + "spanner.examples.music.SingerInfo," - + "spanner.examples.music.Genre," + + "examples.spanner.music.SingerInfo," + + "examples.spanner.music.Genre," + ")", "CREATE TABLE Singers (" + " SingerId INT64 NOT NULL," + " FirstName STRING(1024)," + " LastName STRING(1024)," - + " SingerInfo spanner.examples.music.SingerInfo," - + " SingerGenre spanner.examples.music.Genre," + + " SingerInfo examples.spanner.music.SingerInfo," + + " SingerGenre examples.spanner.music.Genre," + " SingerNationality STRING(1024) AS (SingerInfo.nationality) STORED," + " ) PRIMARY KEY (SingerNationality, SingerGenre)", "CREATE TABLE Types (" @@ -118,10 +110,10 @@ public static void createDatabase() throws Exception { + " Bytes BYTES(MAX)," + " Int64Array ARRAY," + " BytesArray ARRAY," - + " ProtoMessage spanner.examples.music.SingerInfo," - + " ProtoEnum spanner.examples.music.Genre," - + " ProtoMessageArray ARRAY," - + " ProtoEnumArray ARRAY," + + " ProtoMessage examples.spanner.music.SingerInfo," + + " ProtoEnum examples.spanner.music.Genre," + + " ProtoMessageArray ARRAY," + + " ProtoEnumArray ARRAY," + " ) PRIMARY KEY (RowID)", "CREATE INDEX SingerByNationalityAndGenre ON Singers(SingerNationality, SingerGenre)" + " STORING (SingerId, FirstName, LastName)")) @@ -139,7 +131,7 @@ public static void createDatabase() throws Exception { @AfterClass public static void afterClass() throws Exception { try { - if (!isUsingEmulator() && isUsingAllowlistedProject()) { + if (!isUsingEmulator()) { dbAdminClient.dropDatabase( databaseID.getInstanceId().getInstance(), databaseID.getDatabase()); } @@ -169,7 +161,6 @@ public void after() throws Exception { public void testProtoColumnsUpdateAndRead() { assumeFalse( "Proto Column is not supported in the emulator", EmulatorSpannerHelper.isUsingEmulator()); - assumeTrue("Proto Column is not yet enabled in production", isUsingAllowlistedProject()); SingerInfo singerInfo = SingerInfo.newBuilder().setSingerId(1).setNationality("Country1").build(); ByteArray singerInfoBytes = ByteArray.copyFrom(singerInfo.toByteArray()); @@ -277,7 +268,6 @@ public void testProtoColumnsUpdateAndRead() { public void testProtoColumnsDMLParameterizedQueriesPKAndIndexes() { assumeFalse( "Proto Column is not supported in the emulator", EmulatorSpannerHelper.isUsingEmulator()); - assumeTrue("Proto Column is not yet enabled in production", isUsingAllowlistedProject()); SingerInfo singerInfo1 = SingerInfo.newBuilder().setSingerId(1).setNationality("Country1").build(); @@ -384,7 +374,6 @@ public void testProtoColumnsDMLParameterizedQueriesPKAndIndexes() { public void testProtoMessageDeserializationError() { assumeFalse( "Proto Column is not supported in the emulator", EmulatorSpannerHelper.isUsingEmulator()); - assumeTrue("Proto Column is not yet enabled in production", isUsingAllowlistedProject()); SingerInfo singerInfo = SingerInfo.newBuilder().setSingerId(1).setNationality("Country1").build(); diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/GapicSpannerRpcTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/GapicSpannerRpcTest.java index 42a07ed9ea6..b3ff3b8f1c2 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/GapicSpannerRpcTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/GapicSpannerRpcTest.java @@ -47,6 +47,7 @@ import com.google.cloud.spanner.SpannerExceptionFactory; import com.google.cloud.spanner.SpannerOptions; import com.google.cloud.spanner.SpannerOptions.CallContextConfigurator; +import com.google.cloud.spanner.SpannerOptionsHelper; import com.google.cloud.spanner.Statement; import com.google.cloud.spanner.TransactionRunner; import com.google.cloud.spanner.spi.v1.GapicSpannerRpc.AdminRequestsLimitExceededRetryAlgorithm; @@ -76,6 +77,12 @@ import io.grpc.auth.MoreCallCredentials; import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; import io.grpc.protobuf.lite.ProtoLiteUtils; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator; +import io.opentelemetry.context.propagation.ContextPropagators; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.samplers.Sampler; import java.io.IOException; import java.net.InetSocketAddress; import java.util.HashMap; @@ -148,6 +155,8 @@ public class GapicSpannerRpcTest { private static String defaultUserAgent; private static Spanner spanner; private static boolean isRouteToLeader; + private static boolean isEndToEndTracing; + private static boolean isTraceContextPresent; @Parameter public Dialect dialect; @@ -158,6 +167,10 @@ public static Object[] data() { @Before public void startServer() throws IOException { + // Enable OpenTelemetry tracing. + SpannerOptionsHelper.resetActiveTracingFramework(); + SpannerOptions.enableOpenTelemetryTraces(); + assumeTrue( "Skip tests when emulator is enabled as this test interferes with the check whether the emulator is running", System.getenv("SPANNER_EMULATOR_HOST") == null); @@ -194,13 +207,23 @@ public ServerCall.Listener interceptCall( if (call.getMethodDescriptor() .equals(SpannerGrpc.getExecuteStreamingSqlMethod()) || call.getMethodDescriptor().equals(SpannerGrpc.getExecuteSqlMethod())) { + String traceParentHeader = + headers.get(Key.of("traceparent", Metadata.ASCII_STRING_MARSHALLER)); + isTraceContextPresent = (traceParentHeader != null); String routeToLeaderHeader = headers.get( Key.of( "x-goog-spanner-route-to-leader", Metadata.ASCII_STRING_MARSHALLER)); + String endToEndTracingHeader = + headers.get( + Key.of( + "x-goog-spanner-end-to-end-tracing", + Metadata.ASCII_STRING_MARSHALLER)); isRouteToLeader = (routeToLeaderHeader != null && routeToLeaderHeader.equals("true")); + isEndToEndTracing = + (endToEndTracingHeader != null && endToEndTracingHeader.equals("true")); } return Contexts.interceptCall(Context.current(), call, headers, next); } @@ -224,6 +247,8 @@ public void reset() throws InterruptedException { server.awaitTermination(); } isRouteToLeader = false; + isEndToEndTracing = false; + isTraceContextPresent = false; } @Test @@ -464,6 +489,83 @@ public void testNewCallContextWithRouteToLeaderHeaderAndLarDisabled() { rpc.shutdown(); } + @Test + public void testNewCallContextWithEndToEndTracingHeader() { + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("some-project") + .setEnableEndToEndTracing(true) + .build(); + GapicSpannerRpc rpc = new GapicSpannerRpc(options, false); + GrpcCallContext callContext = + rpc.newCallContext( + optionsMap, + "/some/resource", + ExecuteSqlRequest.getDefaultInstance(), + SpannerGrpc.getExecuteSqlMethod()); + assertNotNull(callContext); + assertEquals( + ImmutableList.of("true"), + callContext.getExtraHeaders().get("x-goog-spanner-end-to-end-tracing")); + assertEquals( + ImmutableList.of("projects/some-project"), + callContext.getExtraHeaders().get(ApiClientHeaderProvider.getDefaultResourceHeaderKey())); + rpc.shutdown(); + } + + @Test + public void testNewCallContextWithoutEndToEndTracingHeader() { + SpannerOptions options = + SpannerOptions.newBuilder() + .setProjectId("some-project") + .setEnableEndToEndTracing(false) + .build(); + GapicSpannerRpc rpc = new GapicSpannerRpc(options, false); + GrpcCallContext callContext = + rpc.newCallContext( + optionsMap, + "/some/resource", + ExecuteSqlRequest.getDefaultInstance(), + SpannerGrpc.getExecuteSqlMethod()); + assertNotNull(callContext); + assertNull(callContext.getExtraHeaders().get("x-goog-spanner-end-to-end-tracing")); + rpc.shutdown(); + } + + @Test + public void testEndToEndTracingHeaderWithEnabledTracing() { + final SpannerOptions options = + createSpannerOptions().toBuilder().setEnableEndToEndTracing(true).build(); + try (Spanner spanner = options.getService()) { + final DatabaseClient databaseClient = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + TransactionRunner runner = databaseClient.readWriteTransaction(); + runner.run( + transaction -> { + transaction.executeUpdate(UPDATE_FOO_STATEMENT); + return null; + }); + } + assertTrue(isEndToEndTracing); + } + + @Test + public void testEndToEndTracingHeaderWithDisabledTracing() { + final SpannerOptions options = + createSpannerOptions().toBuilder().setEnableEndToEndTracing(false).build(); + try (Spanner spanner = options.getService()) { + final DatabaseClient databaseClient = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + TransactionRunner runner = databaseClient.readWriteTransaction(); + runner.run( + transaction -> { + transaction.executeUpdate(UPDATE_FOO_STATEMENT); + return null; + }); + } + assertFalse(isEndToEndTracing); + } + @Test public void testAdminRequestsLimitExceededRetryAlgorithm() { AdminRequestsLimitExceededRetryAlgorithm alg = @@ -535,6 +637,73 @@ public void testCustomUserAgent() { } } + @Test + public void testTraceContextHeaderWithOpenTelemetryAndEndToEndTracingEnabled() { + OpenTelemetry openTelemetry = + OpenTelemetrySdk.builder() + .setPropagators(ContextPropagators.create(W3CTraceContextPropagator.getInstance())) + .setTracerProvider(SdkTracerProvider.builder().setSampler(Sampler.alwaysOn()).build()) + .build(); + + final SpannerOptions options = + createSpannerOptions() + .toBuilder() + .setOpenTelemetry(openTelemetry) + .setEnableEndToEndTracing(true) + .build(); + try (Spanner spanner = options.getService()) { + final DatabaseClient databaseClient = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + + try (final ResultSet rs = databaseClient.singleUse().executeQuery(SELECT1AND2)) { + rs.next(); + } + + assertTrue(isTraceContextPresent); + } + } + + @Test + public void testTraceContextHeaderWithOpenTelemetryAndEndToEndTracingDisabled() { + OpenTelemetry openTelemetry = + OpenTelemetrySdk.builder() + .setPropagators(ContextPropagators.create(W3CTraceContextPropagator.getInstance())) + .setTracerProvider(SdkTracerProvider.builder().setSampler(Sampler.alwaysOn()).build()) + .build(); + + final SpannerOptions options = + createSpannerOptions() + .toBuilder() + .setOpenTelemetry(openTelemetry) + .setEnableEndToEndTracing(false) + .build(); + try (Spanner spanner = options.getService()) { + final DatabaseClient databaseClient = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + + try (final ResultSet rs = databaseClient.singleUse().executeQuery(SELECT1AND2)) { + rs.next(); + } + + assertFalse(isTraceContextPresent); + } + } + + @Test + public void testTraceContextHeaderWithoutOpenTelemetry() { + final SpannerOptions options = createSpannerOptions(); + try (Spanner spanner = options.getService()) { + final DatabaseClient databaseClient = + spanner.getDatabaseClient(DatabaseId.of("[PROJECT]", "[INSTANCE]", "[DATABASE]")); + + try (final ResultSet rs = databaseClient.singleUse().executeQuery(SELECT1AND2)) { + rs.next(); + } + + assertFalse(isTraceContextPresent); + } + } + @Test public void testRouteToLeaderHeaderForReadOnly() { final SpannerOptions options = diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/GfeLatencyTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/GfeLatencyTest.java index 9bb09aace70..908a4ad5573 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/GfeLatencyTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/GfeLatencyTest.java @@ -17,6 +17,7 @@ package com.google.cloud.spanner.spi.v1; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import com.google.auth.oauth2.AccessToken; import com.google.auth.oauth2.OAuth2Credentials; @@ -27,6 +28,8 @@ import com.google.cloud.spanner.Spanner; import com.google.cloud.spanner.SpannerOptions; import com.google.cloud.spanner.Statement; +import com.google.common.base.MoreObjects; +import com.google.common.base.Preconditions; import com.google.protobuf.ListValue; import com.google.spanner.v1.ResultSetMetadata; import com.google.spanner.v1.StructType; @@ -47,7 +50,6 @@ import io.opencensus.tags.TagValue; import java.io.IOException; import java.net.InetSocketAddress; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; @@ -80,26 +82,22 @@ public class GfeLatencyTest { private static MockSpannerServiceImpl mockSpanner; private static Server server; - private static InetSocketAddress address; private static Spanner spanner; private static DatabaseClient databaseClient; - private static final Map optionsMap = new HashMap<>(); - private static MockSpannerServiceImpl mockSpannerNoHeader; private static Server serverNoHeader; - private static InetSocketAddress addressNoHeader; private static Spanner spannerNoHeader; private static DatabaseClient databaseClientNoHeader; - private static String instanceId = "fake-instance"; - private static String databaseId = "fake-database"; - private static String projectId = "fake-project"; + private static final String INSTANCE_ID = "fake-instance"; + private static final String DATABASE_ID = "fake-database"; + private static final String PROJECT_ID = "fake-project"; - private static final long WAIT_FOR_METRICS_TIME_MS = 1_000; - private static final int MAXIMUM_RETRIES = 5; + private static final int MAXIMUM_RETRIES = 50000; - private static AtomicInteger fakeServerTiming = new AtomicInteger(new Random().nextInt(1000) + 1); + private static final AtomicInteger FAKE_SERVER_TIMING = + new AtomicInteger(new Random().nextInt(1000) + 1); private static final Statement SELECT1AND2 = Statement.of("SELECT 1 AS COL1 UNION ALL SELECT 2 AS COL1"); @@ -135,6 +133,7 @@ public class GfeLatencyTest { @BeforeClass public static void startServer() throws IOException { + //noinspection deprecation SpannerRpcViews.registerGfeLatencyAndHeaderMissingCountViews(); mockSpanner = new MockSpannerServiceImpl(); @@ -143,7 +142,7 @@ public static void startServer() throws IOException { MockSpannerServiceImpl.StatementResult.query(SELECT1AND2, SELECT1_RESULTSET)); mockSpanner.putStatementResult( MockSpannerServiceImpl.StatementResult.update(UPDATE_FOO_STATEMENT, 1L)); - address = new InetSocketAddress("localhost", 0); + InetSocketAddress address = new InetSocketAddress("localhost", 0); server = NettyServerBuilder.forAddress(address) .addService(mockSpanner) @@ -161,7 +160,7 @@ public ServerCall.Listener interceptCall( public void sendHeaders(Metadata headers) { headers.put( Metadata.Key.of("server-timing", Metadata.ASCII_STRING_MARSHALLER), - String.format("gfet4t7; dur=%d", fakeServerTiming.get())); + String.format("gfet4t7; dur=%d", FAKE_SERVER_TIMING.get())); super.sendHeaders(headers); } }, @@ -170,9 +169,8 @@ public void sendHeaders(Metadata headers) { }) .build() .start(); - optionsMap.put(SpannerRpc.Option.CHANNEL_HINT, 1L); spanner = createSpannerOptions(address, server).getService(); - databaseClient = spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + databaseClient = spanner.getDatabaseClient(DatabaseId.of(PROJECT_ID, INSTANCE_ID, DATABASE_ID)); mockSpannerNoHeader = new MockSpannerServiceImpl(); mockSpannerNoHeader.setAbortProbability(0.0D); @@ -180,7 +178,7 @@ public void sendHeaders(Metadata headers) { MockSpannerServiceImpl.StatementResult.query(SELECT1AND2, SELECT1_RESULTSET)); mockSpannerNoHeader.putStatementResult( MockSpannerServiceImpl.StatementResult.update(UPDATE_FOO_STATEMENT, 1L)); - addressNoHeader = new InetSocketAddress("localhost", 0); + InetSocketAddress addressNoHeader = new InetSocketAddress("localhost", 0); serverNoHeader = NettyServerBuilder.forAddress(addressNoHeader) .addService(mockSpannerNoHeader) @@ -188,7 +186,7 @@ public void sendHeaders(Metadata headers) { .start(); spannerNoHeader = createSpannerOptions(addressNoHeader, serverNoHeader).getService(); databaseClientNoHeader = - spannerNoHeader.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + spannerNoHeader.getDatabaseClient(DatabaseId.of(PROJECT_ID, INSTANCE_ID, DATABASE_ID)); } @AfterClass @@ -221,12 +219,9 @@ public void testGfeLatencyExecuteStreamingSql() throws InterruptedException { long latency = getMetric( SpannerRpcViews.SPANNER_GFE_LATENCY_VIEW, - projectId, - instanceId, - databaseId, "google.spanner.v1.Spanner/ExecuteStreamingSql", false); - assertEquals(fakeServerTiming.get(), latency); + assertEquals(FAKE_SERVER_TIMING.get(), latency); } @Test @@ -238,12 +233,9 @@ public void testGfeLatencyExecuteSql() throws InterruptedException { long latency = getMetric( SpannerRpcViews.SPANNER_GFE_LATENCY_VIEW, - projectId, - instanceId, - databaseId, "google.spanner.v1.Spanner/ExecuteSql", false); - assertEquals(fakeServerTiming.get(), latency); + assertEquals(FAKE_SERVER_TIMING.get(), latency); } @Test @@ -254,9 +246,6 @@ public void testGfeMissingHeaderCountExecuteStreamingSql() throws InterruptedExc long count = getMetric( SpannerRpcViews.SPANNER_GFE_HEADER_MISSING_COUNT_VIEW, - projectId, - instanceId, - databaseId, "google.spanner.v1.Spanner/ExecuteStreamingSql", false); assertEquals(0, count); @@ -267,9 +256,6 @@ public void testGfeMissingHeaderCountExecuteStreamingSql() throws InterruptedExc long count1 = getMetric( SpannerRpcViews.SPANNER_GFE_HEADER_MISSING_COUNT_VIEW, - projectId, - instanceId, - databaseId, "google.spanner.v1.Spanner/ExecuteStreamingSql", true); assertEquals(1, count1); @@ -283,9 +269,6 @@ public void testGfeMissingHeaderExecuteSql() throws InterruptedException { long count = getMetric( SpannerRpcViews.SPANNER_GFE_HEADER_MISSING_COUNT_VIEW, - projectId, - instanceId, - databaseId, "google.spanner.v1.Spanner/ExecuteSql", false); assertEquals(0, count); @@ -296,9 +279,6 @@ public void testGfeMissingHeaderExecuteSql() throws InterruptedException { long count1 = getMetric( SpannerRpcViews.SPANNER_GFE_HEADER_MISSING_COUNT_VIEW, - projectId, - instanceId, - databaseId, "google.spanner.v1.Spanner/ExecuteSql", true); assertEquals(1, count1); @@ -321,78 +301,75 @@ private static SpannerOptions createSpannerOptions(InetSocketAddress address, Se } private long getAggregationValueAsLong(AggregationData aggregationData) { - return aggregationData.match( - new io.opencensus.common.Function() { - @Override - public Long apply(AggregationData.SumDataDouble arg) { - return (long) arg.getSum(); - } - }, - new io.opencensus.common.Function() { - @Override - public Long apply(AggregationData.SumDataLong arg) { - return arg.getSum(); - } - }, - new io.opencensus.common.Function() { - @Override - public Long apply(AggregationData.CountData arg) { - return arg.getCount(); - } - }, - new io.opencensus.common.Function() { - @Override - public Long apply(AggregationData.DistributionData arg) { - return (long) arg.getMean(); - } - }, - new io.opencensus.common.Function() { - @Override - public Long apply(AggregationData.LastValueDataDouble arg) { - return (long) arg.getLastValue(); - } - }, - new io.opencensus.common.Function() { - @Override - public Long apply(AggregationData.LastValueDataLong arg) { - return arg.getLastValue(); - } - }, - new io.opencensus.common.Function() { - @Override - public Long apply(AggregationData arg) { - throw new UnsupportedOperationException(); - } - }); + return MoreObjects.firstNonNull( + aggregationData.match( + new io.opencensus.common.Function() { + @Override + public Long apply(AggregationData.SumDataDouble arg) { + return (long) Preconditions.checkNotNull(arg).getSum(); + } + }, + new io.opencensus.common.Function() { + @Override + public Long apply(AggregationData.SumDataLong arg) { + return Preconditions.checkNotNull(arg).getSum(); + } + }, + new io.opencensus.common.Function() { + @Override + public Long apply(AggregationData.CountData arg) { + return Preconditions.checkNotNull(arg).getCount(); + } + }, + new io.opencensus.common.Function() { + @Override + public Long apply(AggregationData.DistributionData arg) { + return (long) Preconditions.checkNotNull(arg).getMean(); + } + }, + new io.opencensus.common.Function() { + @Override + public Long apply(AggregationData.LastValueDataDouble arg) { + return (long) Preconditions.checkNotNull(arg).getLastValue(); + } + }, + new io.opencensus.common.Function() { + @Override + public Long apply(AggregationData.LastValueDataLong arg) { + return Preconditions.checkNotNull(arg).getLastValue(); + } + }, + new io.opencensus.common.Function() { + @Override + public Long apply(AggregationData arg) { + throw new UnsupportedOperationException(); + } + }), + -1L); } - private long getMetric( - View view, - String projectId, - String instanceId, - String databaseId, - String method, - boolean withOverride) - throws InterruptedException { + private long getMetric(View view, String method, boolean withOverride) { List tagValues = new java.util.ArrayList<>(); for (TagKey column : view.getColumns()) { if (column == SpannerRpcViews.INSTANCE_ID) { - tagValues.add(TagValue.create(instanceId)); + tagValues.add(TagValue.create(INSTANCE_ID)); } else if (column == SpannerRpcViews.DATABASE_ID) { - tagValues.add(TagValue.create(databaseId)); + tagValues.add(TagValue.create(DATABASE_ID)); } else if (column == SpannerRpcViews.METHOD) { tagValues.add(TagValue.create(method)); } else if (column == SpannerRpcViews.PROJECT_ID) { - tagValues.add(TagValue.create(projectId)); + tagValues.add(TagValue.create(PROJECT_ID)); } } for (int i = 0; i < MAXIMUM_RETRIES; i++) { - Thread.sleep(WAIT_FOR_METRICS_TIME_MS); + Thread.yield(); ViewData viewData = SpannerRpcViews.viewManager.getView(view.getName()); + assertNotNull(viewData); if (viewData.getAggregationMap() != null) { Map, AggregationData> aggregationMap = viewData.getAggregationMap(); AggregationData aggregationData = aggregationMap.get(tagValues); - if (withOverride && getAggregationValueAsLong(aggregationData) == 0) { + if (aggregationData == null + || withOverride && getAggregationValueAsLong(aggregationData) == 0) { continue; } return getAggregationValueAsLong(aggregationData); diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/SpannerMetadataProviderTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/SpannerMetadataProviderTest.java index cc43e2dc334..c4fdd6200af 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/SpannerMetadataProviderTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/spi/v1/SpannerMetadataProviderTest.java @@ -94,6 +94,17 @@ public void testNewRouteToLeaderHeader() { assertTrue(Maps.difference(extraHeaders, expectedHeaders).areEqual()); } + @Test + public void testNewEndToEndTracingHeader() { + SpannerMetadataProvider metadataProvider = + SpannerMetadataProvider.create(ImmutableMap.of(), "header1"); + Map> extraHeaders = metadataProvider.newEndToEndTracingHeader(); + Map> expectedHeaders = + ImmutableMap.>of( + "x-goog-spanner-end-to-end-tracing", ImmutableList.of("true")); + assertTrue(Maps.difference(extraHeaders, expectedHeaders).areEqual()); + } + private String getResourceHeaderValue( SpannerMetadataProvider headerProvider, String resourceTokenTemplate) { Metadata metadata = headerProvider.newMetadata(resourceTokenTemplate, "projects/p"); diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/SpannerClientHttpJsonTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/SpannerClientHttpJsonTest.java index 3b5f106f13f..1683d533855 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/SpannerClientHttpJsonTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/SpannerClientHttpJsonTest.java @@ -44,6 +44,7 @@ import com.google.spanner.v1.ExecuteSqlRequest; import com.google.spanner.v1.KeySet; import com.google.spanner.v1.ListSessionsResponse; +import com.google.spanner.v1.MultiplexedSessionPrecommitToken; import com.google.spanner.v1.Mutation; import com.google.spanner.v1.Partition; import com.google.spanner.v1.PartitionOptions; @@ -587,6 +588,7 @@ public void executeSqlTest() throws Exception { .setMetadata(ResultSetMetadata.newBuilder().build()) .addAllRows(new ArrayList()) .setStats(ResultSetStats.newBuilder().build()) + .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) .build(); mockService.addResponse(expectedResponse); @@ -673,6 +675,7 @@ public void executeBatchDmlTest() throws Exception { ExecuteBatchDmlResponse.newBuilder() .addAllResultSets(new ArrayList()) .setStatus(Status.newBuilder().build()) + .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) .build(); mockService.addResponse(expectedResponse); @@ -735,6 +738,7 @@ public void readTest() throws Exception { .setMetadata(ResultSetMetadata.newBuilder().build()) .addAllRows(new ArrayList()) .setStats(ResultSetStats.newBuilder().build()) + .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) .build(); mockService.addResponse(expectedResponse); @@ -821,6 +825,7 @@ public void beginTransactionTest() throws Exception { Transaction.newBuilder() .setId(ByteString.EMPTY) .setReadTimestamp(Timestamp.newBuilder().build()) + .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) .build(); mockService.addResponse(expectedResponse); @@ -868,6 +873,7 @@ public void beginTransactionTest2() throws Exception { Transaction.newBuilder() .setId(ByteString.EMPTY) .setReadTimestamp(Timestamp.newBuilder().build()) + .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) .build(); mockService.addResponse(expectedResponse); diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/SpannerClientTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/SpannerClientTest.java index 24b774425f6..83caf7405d9 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/SpannerClientTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/SpannerClientTest.java @@ -55,6 +55,7 @@ import com.google.spanner.v1.KeySet; import com.google.spanner.v1.ListSessionsRequest; import com.google.spanner.v1.ListSessionsResponse; +import com.google.spanner.v1.MultiplexedSessionPrecommitToken; import com.google.spanner.v1.Mutation; import com.google.spanner.v1.PartialResultSet; import com.google.spanner.v1.Partition; @@ -545,6 +546,7 @@ public void executeSqlTest() throws Exception { .setMetadata(ResultSetMetadata.newBuilder().build()) .addAllRows(new ArrayList()) .setStats(ResultSetStats.newBuilder().build()) + .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) .build(); mockSpanner.addResponse(expectedResponse); @@ -629,6 +631,7 @@ public void executeStreamingSqlTest() throws Exception { .setChunkedValue(true) .setResumeToken(ByteString.EMPTY) .setStats(ResultSetStats.newBuilder().build()) + .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) .build(); mockSpanner.addResponse(expectedResponse); ExecuteSqlRequest request = @@ -702,6 +705,7 @@ public void executeBatchDmlTest() throws Exception { ExecuteBatchDmlResponse.newBuilder() .addAllResultSets(new ArrayList()) .setStatus(Status.newBuilder().build()) + .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) .build(); mockSpanner.addResponse(expectedResponse); @@ -762,6 +766,7 @@ public void readTest() throws Exception { .setMetadata(ResultSetMetadata.newBuilder().build()) .addAllRows(new ArrayList()) .setStats(ResultSetStats.newBuilder().build()) + .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) .build(); mockSpanner.addResponse(expectedResponse); @@ -801,6 +806,8 @@ public void readTest() throws Exception { Assert.assertEquals(request.getRequestOptions(), actualRequest.getRequestOptions()); Assert.assertEquals(request.getDirectedReadOptions(), actualRequest.getDirectedReadOptions()); Assert.assertEquals(request.getDataBoostEnabled(), actualRequest.getDataBoostEnabled()); + Assert.assertEquals(request.getOrderBy(), actualRequest.getOrderBy()); + Assert.assertEquals(request.getLockHint(), actualRequest.getLockHint()); Assert.assertTrue( channelProvider.isHeaderSent( ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), @@ -845,6 +852,7 @@ public void streamingReadTest() throws Exception { .setChunkedValue(true) .setResumeToken(ByteString.EMPTY) .setStats(ResultSetStats.newBuilder().build()) + .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) .build(); mockSpanner.addResponse(expectedResponse); ReadRequest request = @@ -918,6 +926,7 @@ public void beginTransactionTest() throws Exception { Transaction.newBuilder() .setId(ByteString.EMPTY) .setReadTimestamp(Timestamp.newBuilder().build()) + .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) .build(); mockSpanner.addResponse(expectedResponse); @@ -960,6 +969,7 @@ public void beginTransactionTest2() throws Exception { Transaction.newBuilder() .setId(ByteString.EMPTY) .setReadTimestamp(Timestamp.newBuilder().build()) + .setPrecommitToken(MultiplexedSessionPrecommitToken.newBuilder().build()) .build(); mockSpanner.addResponse(expectedResponse); diff --git a/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/README.md b/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/README.md new file mode 100644 index 00000000000..967cb32a298 --- /dev/null +++ b/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/README.md @@ -0,0 +1,5 @@ +#### To generate SingerProto.java and descriptors.pb file from singer.proto using `protoc` +```shell +cd google-cloud-spanner/src/test/resources/com/google/cloud/spanner +protoc --proto_path=. --include_imports --descriptor_set_out=descriptors.pb --java_out=. singer.proto +``` diff --git a/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ClientSideStatementsTest.sql b/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ClientSideStatementsTest.sql index 30aed342903..9df55c43c00 100644 --- a/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ClientSideStatementsTest.sql +++ b/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ClientSideStatementsTest.sql @@ -3591,6 +3591,205 @@ NEW_CONNECTION; @EXPECT EXCEPTION UNIMPLEMENTED show variable/-transaction_tag; NEW_CONNECTION; +show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +SHOW VARIABLE EXCLUDE_TXN_FROM_CHANGE_STREAMS; +NEW_CONNECTION; +show variable exclude_txn_from_change_streams; +NEW_CONNECTION; + show variable exclude_txn_from_change_streams; +NEW_CONNECTION; + show variable exclude_txn_from_change_streams; +NEW_CONNECTION; + + + +show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +show variable exclude_txn_from_change_streams ; +NEW_CONNECTION; +show variable exclude_txn_from_change_streams ; +NEW_CONNECTION; +show variable exclude_txn_from_change_streams + +; +NEW_CONNECTION; +show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +show +variable +exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable exclude_txn_from_change_streams/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-exclude_txn_from_change_streams; +NEW_CONNECTION; show variable rpc_priority; NEW_CONNECTION; SHOW VARIABLE RPC_PRIORITY; @@ -4188,151 +4387,350 @@ NEW_CONNECTION; @EXPECT EXCEPTION UNIMPLEMENTED show variable/-delay_transaction_start_until_first_write; NEW_CONNECTION; -partition select col1, col2 from my_table; +show variable keep_transaction_alive; NEW_CONNECTION; -PARTITION SELECT COL1, COL2 FROM MY_TABLE; +SHOW VARIABLE KEEP_TRANSACTION_ALIVE; NEW_CONNECTION; -partition select col1, col2 from my_table; +show variable keep_transaction_alive; NEW_CONNECTION; - partition select col1, col2 from my_table; + show variable keep_transaction_alive; NEW_CONNECTION; - partition select col1, col2 from my_table; + show variable keep_transaction_alive; NEW_CONNECTION; -partition select col1, col2 from my_table; +show variable keep_transaction_alive; NEW_CONNECTION; -partition select col1, col2 from my_table ; +show variable keep_transaction_alive ; NEW_CONNECTION; -partition select col1, col2 from my_table ; +show variable keep_transaction_alive ; NEW_CONNECTION; -partition select col1, col2 from my_table +show variable keep_transaction_alive ; NEW_CONNECTION; -partition select col1, col2 from my_table; +show variable keep_transaction_alive; NEW_CONNECTION; -partition select col1, col2 from my_table; +show variable keep_transaction_alive; NEW_CONNECTION; -partition -select -col1, -col2 -from -my_table; +show +variable +keep_transaction_alive; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo partition select col1, col2 from my_table; +foo show variable keep_transaction_alive; NEW_CONNECTION; -partition select col1, col2 from my_table bar; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%partition select col1, col2 from my_table; +%show variable keep_transaction_alive; NEW_CONNECTION; -partition select col1, col2 from my_table%; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%keep_transaction_alive; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_partition select col1, col2 from my_table; +_show variable keep_transaction_alive; NEW_CONNECTION; -partition select col1, col2 from my_table_; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_keep_transaction_alive; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&partition select col1, col2 from my_table; +&show variable keep_transaction_alive; NEW_CONNECTION; -partition select col1, col2 from my_table&; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&keep_transaction_alive; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$partition select col1, col2 from my_table; +$show variable keep_transaction_alive; NEW_CONNECTION; -partition select col1, col2 from my_table$; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$keep_transaction_alive; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@partition select col1, col2 from my_table; +@show variable keep_transaction_alive; NEW_CONNECTION; -partition select col1, col2 from my_table@; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@keep_transaction_alive; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!partition select col1, col2 from my_table; +!show variable keep_transaction_alive; NEW_CONNECTION; -partition select col1, col2 from my_table!; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!keep_transaction_alive; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*partition select col1, col2 from my_table; +*show variable keep_transaction_alive; NEW_CONNECTION; -partition select col1, col2 from my_table*; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*keep_transaction_alive; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(partition select col1, col2 from my_table; +(show variable keep_transaction_alive; NEW_CONNECTION; -partition select col1, col2 from my_table(; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(keep_transaction_alive; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)partition select col1, col2 from my_table; +)show variable keep_transaction_alive; NEW_CONNECTION; -partition select col1, col2 from my_table); +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)keep_transaction_alive; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --partition select col1, col2 from my_table; +-show variable keep_transaction_alive; NEW_CONNECTION; -partition select col1, col2 from my_table-; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-keep_transaction_alive; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+partition select col1, col2 from my_table; ++show variable keep_transaction_alive; NEW_CONNECTION; -partition select col1, col2 from my_table+; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+keep_transaction_alive; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#partition select col1, col2 from my_table; +-#show variable keep_transaction_alive; NEW_CONNECTION; -partition select col1, col2 from my_table-#; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#keep_transaction_alive; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/partition select col1, col2 from my_table; +/show variable keep_transaction_alive; NEW_CONNECTION; -partition select col1, col2 from my_table/; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/keep_transaction_alive; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\partition select col1, col2 from my_table; +\show variable keep_transaction_alive; NEW_CONNECTION; -partition select col1, col2 from my_table\; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\keep_transaction_alive; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?partition select col1, col2 from my_table; +?show variable keep_transaction_alive; NEW_CONNECTION; -partition select col1, col2 from my_table?; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?keep_transaction_alive; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/partition select col1, col2 from my_table; +-/show variable keep_transaction_alive; NEW_CONNECTION; -partition select col1, col2 from my_table-/; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/keep_transaction_alive; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#partition select col1, col2 from my_table; +/#show variable keep_transaction_alive; NEW_CONNECTION; -partition select col1, col2 from my_table/#; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#keep_transaction_alive; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-partition select col1, col2 from my_table; +/-show variable keep_transaction_alive; NEW_CONNECTION; -partition select col1, col2 from my_table/-; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable keep_transaction_alive/-; NEW_CONNECTION; -run partitioned query select col1, col2 from my_table; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-keep_transaction_alive; NEW_CONNECTION; -RUN PARTITIONED QUERY SELECT COL1, COL2 FROM MY_TABLE; +partition select col1, col2 from my_table; NEW_CONNECTION; -run partitioned query select col1, col2 from my_table; +PARTITION SELECT COL1, COL2 FROM MY_TABLE; NEW_CONNECTION; - run partitioned query select col1, col2 from my_table; +partition select col1, col2 from my_table; NEW_CONNECTION; - run partitioned query select col1, col2 from my_table; + partition select col1, col2 from my_table; +NEW_CONNECTION; + partition select col1, col2 from my_table; NEW_CONNECTION; -run partitioned query select col1, col2 from my_table; +partition select col1, col2 from my_table; NEW_CONNECTION; -run partitioned query select col1, col2 from my_table ; +partition select col1, col2 from my_table ; +NEW_CONNECTION; +partition select col1, col2 from my_table ; +NEW_CONNECTION; +partition select col1, col2 from my_table + +; +NEW_CONNECTION; +partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table; +NEW_CONNECTION; +partition +select +col1, +col2 +from +my_table; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-partition select col1, col2 from my_table; +NEW_CONNECTION; +partition select col1, col2 from my_table/-; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +RUN PARTITIONED QUERY SELECT COL1, COL2 FROM MY_TABLE; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; + run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; + run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; + + + +run partitioned query select col1, col2 from my_table; +NEW_CONNECTION; +run partitioned query select col1, col2 from my_table ; NEW_CONNECTION; run partitioned query select col1, col2 from my_table ; NEW_CONNECTION; @@ -7230,285 +7628,483 @@ start batch ddl; @EXPECT EXCEPTION INVALID_ARGUMENT abort/-batch; NEW_CONNECTION; -set autocommit = true; +reset all; NEW_CONNECTION; -SET AUTOCOMMIT = TRUE; +RESET ALL; NEW_CONNECTION; -set autocommit = true; +reset all; NEW_CONNECTION; - set autocommit = true; + reset all; NEW_CONNECTION; - set autocommit = true; + reset all; NEW_CONNECTION; -set autocommit = true; +reset all; NEW_CONNECTION; -set autocommit = true ; +reset all ; NEW_CONNECTION; -set autocommit = true ; +reset all ; NEW_CONNECTION; -set autocommit = true +reset all ; NEW_CONNECTION; -set autocommit = true; +reset all; NEW_CONNECTION; -set autocommit = true; +reset all; NEW_CONNECTION; -set -autocommit -= -true; +reset +all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set autocommit = true; +foo reset all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = true bar; +reset all bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set autocommit = true; +%reset all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = true%; +reset all%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit =%true; +reset%all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set autocommit = true; +_reset all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = true_; +reset all_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit =_true; +reset_all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set autocommit = true; +&reset all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = true&; +reset all&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit =&true; +reset&all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set autocommit = true; +$reset all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = true$; +reset all$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit =$true; +reset$all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set autocommit = true; +@reset all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = true@; +reset all@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit =@true; +reset@all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set autocommit = true; +!reset all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = true!; +reset all!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit =!true; +reset!all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set autocommit = true; +*reset all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = true*; +reset all*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit =*true; +reset*all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set autocommit = true; +(reset all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = true(; +reset all(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit =(true; +reset(all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set autocommit = true; +)reset all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = true); +reset all); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit =)true; +reset)all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set autocommit = true; +-reset all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = true-; +reset all-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit =-true; +reset-all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set autocommit = true; ++reset all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = true+; +reset all+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit =+true; +reset+all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set autocommit = true; +-#reset all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = true-#; +reset all-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit =-#true; +reset-#all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set autocommit = true; +/reset all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = true/; +reset all/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit =/true; +reset/all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set autocommit = true; +\reset all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = true\; +reset all\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit =\true; +reset\all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set autocommit = true; +?reset all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = true?; +reset all?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit =?true; +reset?all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set autocommit = true; +-/reset all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = true-/; +reset all-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit =-/true; +reset-/all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set autocommit = true; +/#reset all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = true/#; +reset all/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit =/#true; +reset/#all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set autocommit = true; +/-reset all; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = true/-; +reset all/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit =/-true; +reset/-all; NEW_CONNECTION; -set autocommit = false; +set autocommit = true; NEW_CONNECTION; -SET AUTOCOMMIT = FALSE; +SET AUTOCOMMIT = TRUE; NEW_CONNECTION; -set autocommit = false; +set autocommit = true; NEW_CONNECTION; - set autocommit = false; + set autocommit = true; NEW_CONNECTION; - set autocommit = false; + set autocommit = true; NEW_CONNECTION; -set autocommit = false; +set autocommit = true; NEW_CONNECTION; -set autocommit = false ; +set autocommit = true ; NEW_CONNECTION; -set autocommit = false ; +set autocommit = true ; NEW_CONNECTION; -set autocommit = false +set autocommit = true ; NEW_CONNECTION; -set autocommit = false; +set autocommit = true; NEW_CONNECTION; -set autocommit = false; +set autocommit = true; NEW_CONNECTION; set autocommit = -false; +true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set autocommit = false; +foo set autocommit = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = false bar; +set autocommit = true bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set autocommit = false; +%set autocommit = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = false%; +set autocommit = true%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit =%false; +set autocommit =%true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set autocommit = false; +_set autocommit = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = false_; +set autocommit = true_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit =_false; +set autocommit =_true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set autocommit = false; +&set autocommit = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = false&; +set autocommit = true&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit =&false; +set autocommit =&true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set autocommit = false; +$set autocommit = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = false$; +set autocommit = true$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit =$false; +set autocommit =$true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set autocommit = false; +@set autocommit = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set autocommit = false@; +set autocommit = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set autocommit = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =/-true; +NEW_CONNECTION; +set autocommit = false; +NEW_CONNECTION; +SET AUTOCOMMIT = FALSE; +NEW_CONNECTION; +set autocommit = false; +NEW_CONNECTION; + set autocommit = false; +NEW_CONNECTION; + set autocommit = false; +NEW_CONNECTION; + + + +set autocommit = false; +NEW_CONNECTION; +set autocommit = false ; +NEW_CONNECTION; +set autocommit = false ; +NEW_CONNECTION; +set autocommit = false + +; +NEW_CONNECTION; +set autocommit = false; +NEW_CONNECTION; +set autocommit = false; +NEW_CONNECTION; +set +autocommit += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set autocommit = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set autocommit = false@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT set autocommit =@false; @@ -9296,13304 +9892,17100 @@ NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT set/-statement_timeout=null; NEW_CONNECTION; -set statement_timeout='1s'; +set statement_timeout = null ; NEW_CONNECTION; -SET STATEMENT_TIMEOUT='1S'; +SET STATEMENT_TIMEOUT = NULL ; NEW_CONNECTION; -set statement_timeout='1s'; +set statement_timeout = null ; NEW_CONNECTION; - set statement_timeout='1s'; + set statement_timeout = null ; NEW_CONNECTION; - set statement_timeout='1s'; + set statement_timeout = null ; NEW_CONNECTION; -set statement_timeout='1s'; +set statement_timeout = null ; NEW_CONNECTION; -set statement_timeout='1s' ; +set statement_timeout = null ; NEW_CONNECTION; -set statement_timeout='1s' ; +set statement_timeout = null ; NEW_CONNECTION; -set statement_timeout='1s' +set statement_timeout = null ; NEW_CONNECTION; -set statement_timeout='1s'; +set statement_timeout = null ; NEW_CONNECTION; -set statement_timeout='1s'; +set statement_timeout = null ; NEW_CONNECTION; set -statement_timeout='1s'; +statement_timeout += +null +; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set statement_timeout='1s'; +foo set statement_timeout = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='1s' bar; +set statement_timeout = null bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set statement_timeout='1s'; +%set statement_timeout = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='1s'%; +set statement_timeout = null %; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%statement_timeout='1s'; +set statement_timeout = null%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set statement_timeout='1s'; +_set statement_timeout = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='1s'_; +set statement_timeout = null _; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_statement_timeout='1s'; +set statement_timeout = null_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set statement_timeout='1s'; +&set statement_timeout = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='1s'&; +set statement_timeout = null &; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&statement_timeout='1s'; +set statement_timeout = null&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set statement_timeout='1s'; +$set statement_timeout = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='1s'$; +set statement_timeout = null $; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$statement_timeout='1s'; +set statement_timeout = null$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set statement_timeout='1s'; +@set statement_timeout = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='1s'@; +set statement_timeout = null @; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@statement_timeout='1s'; +set statement_timeout = null@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set statement_timeout='1s'; +!set statement_timeout = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='1s'!; +set statement_timeout = null !; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!statement_timeout='1s'; +set statement_timeout = null!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set statement_timeout='1s'; +*set statement_timeout = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='1s'*; +set statement_timeout = null *; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*statement_timeout='1s'; +set statement_timeout = null*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set statement_timeout='1s'; +(set statement_timeout = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='1s'(; +set statement_timeout = null (; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(statement_timeout='1s'; +set statement_timeout = null(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set statement_timeout='1s'; +)set statement_timeout = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='1s'); +set statement_timeout = null ); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)statement_timeout='1s'; +set statement_timeout = null); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set statement_timeout='1s'; +-set statement_timeout = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='1s'-; +set statement_timeout = null -; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-statement_timeout='1s'; +set statement_timeout = null-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set statement_timeout='1s'; ++set statement_timeout = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='1s'+; +set statement_timeout = null +; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+statement_timeout='1s'; +set statement_timeout = null+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set statement_timeout='1s'; +-#set statement_timeout = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='1s'-#; +set statement_timeout = null -#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#statement_timeout='1s'; +set statement_timeout = null-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set statement_timeout='1s'; +/set statement_timeout = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='1s'/; +set statement_timeout = null /; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/statement_timeout='1s'; +set statement_timeout = null/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set statement_timeout='1s'; +\set statement_timeout = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='1s'\; +set statement_timeout = null \; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\statement_timeout='1s'; +set statement_timeout = null\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set statement_timeout='1s'; +?set statement_timeout = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='1s'?; +set statement_timeout = null ?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?statement_timeout='1s'; +set statement_timeout = null?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set statement_timeout='1s'; +-/set statement_timeout = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='1s'-/; +set statement_timeout = null -/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/statement_timeout='1s'; +set statement_timeout = null-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set statement_timeout='1s'; +/#set statement_timeout = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='1s'/#; +set statement_timeout = null /#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#statement_timeout='1s'; +set statement_timeout = null/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set statement_timeout='1s'; +/-set statement_timeout = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='1s'/-; +set statement_timeout = null /-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-statement_timeout='1s'; +set statement_timeout = null/-; NEW_CONNECTION; -set statement_timeout='100ms'; +set statement_timeout='1s'; NEW_CONNECTION; -SET STATEMENT_TIMEOUT='100MS'; +SET STATEMENT_TIMEOUT='1S'; NEW_CONNECTION; -set statement_timeout='100ms'; +set statement_timeout='1s'; NEW_CONNECTION; - set statement_timeout='100ms'; + set statement_timeout='1s'; NEW_CONNECTION; - set statement_timeout='100ms'; + set statement_timeout='1s'; NEW_CONNECTION; -set statement_timeout='100ms'; +set statement_timeout='1s'; NEW_CONNECTION; -set statement_timeout='100ms' ; +set statement_timeout='1s' ; NEW_CONNECTION; -set statement_timeout='100ms' ; +set statement_timeout='1s' ; NEW_CONNECTION; -set statement_timeout='100ms' +set statement_timeout='1s' ; NEW_CONNECTION; -set statement_timeout='100ms'; +set statement_timeout='1s'; NEW_CONNECTION; -set statement_timeout='100ms'; +set statement_timeout='1s'; NEW_CONNECTION; set -statement_timeout='100ms'; +statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set statement_timeout='100ms'; +foo set statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='100ms' bar; +set statement_timeout='1s' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set statement_timeout='100ms'; +%set statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='100ms'%; +set statement_timeout='1s'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%statement_timeout='100ms'; +set%statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set statement_timeout='100ms'; +_set statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='100ms'_; +set statement_timeout='1s'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_statement_timeout='100ms'; +set_statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set statement_timeout='100ms'; +&set statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='100ms'&; +set statement_timeout='1s'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&statement_timeout='100ms'; +set&statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set statement_timeout='100ms'; +$set statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='100ms'$; +set statement_timeout='1s'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$statement_timeout='100ms'; +set$statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set statement_timeout='100ms'; +@set statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='100ms'@; +set statement_timeout='1s'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@statement_timeout='100ms'; +set@statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set statement_timeout='100ms'; +!set statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='100ms'!; +set statement_timeout='1s'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!statement_timeout='100ms'; +set!statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set statement_timeout='100ms'; +*set statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='100ms'*; +set statement_timeout='1s'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*statement_timeout='100ms'; +set*statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set statement_timeout='100ms'; +(set statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='100ms'(; +set statement_timeout='1s'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(statement_timeout='100ms'; +set(statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set statement_timeout='100ms'; +)set statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='100ms'); +set statement_timeout='1s'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)statement_timeout='100ms'; +set)statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set statement_timeout='100ms'; +-set statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='100ms'-; +set statement_timeout='1s'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-statement_timeout='100ms'; +set-statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set statement_timeout='100ms'; ++set statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='100ms'+; +set statement_timeout='1s'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+statement_timeout='100ms'; +set+statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set statement_timeout='100ms'; +-#set statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='100ms'-#; +set statement_timeout='1s'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#statement_timeout='100ms'; +set-#statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set statement_timeout='100ms'; +/set statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='100ms'/; +set statement_timeout='1s'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/statement_timeout='100ms'; +set/statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set statement_timeout='100ms'; +\set statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='100ms'\; +set statement_timeout='1s'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\statement_timeout='100ms'; +set\statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set statement_timeout='100ms'; +?set statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='100ms'?; +set statement_timeout='1s'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?statement_timeout='100ms'; +set?statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set statement_timeout='100ms'; +-/set statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='100ms'-/; +set statement_timeout='1s'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/statement_timeout='100ms'; +set-/statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set statement_timeout='100ms'; +/#set statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='100ms'/#; +set statement_timeout='1s'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#statement_timeout='100ms'; +set/#statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set statement_timeout='100ms'; +/-set statement_timeout='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='100ms'/-; +set statement_timeout='1s'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-statement_timeout='100ms'; +set/-statement_timeout='1s'; NEW_CONNECTION; -set statement_timeout='10000us'; +set statement_timeout = '1s' ; NEW_CONNECTION; -SET STATEMENT_TIMEOUT='10000US'; +SET STATEMENT_TIMEOUT = '1S' ; NEW_CONNECTION; -set statement_timeout='10000us'; +set statement_timeout = '1s' ; NEW_CONNECTION; - set statement_timeout='10000us'; + set statement_timeout = '1s' ; NEW_CONNECTION; - set statement_timeout='10000us'; + set statement_timeout = '1s' ; NEW_CONNECTION; -set statement_timeout='10000us'; +set statement_timeout = '1s' ; NEW_CONNECTION; -set statement_timeout='10000us' ; +set statement_timeout = '1s' ; NEW_CONNECTION; -set statement_timeout='10000us' ; +set statement_timeout = '1s' ; NEW_CONNECTION; -set statement_timeout='10000us' +set statement_timeout = '1s' ; NEW_CONNECTION; -set statement_timeout='10000us'; +set statement_timeout = '1s' ; NEW_CONNECTION; -set statement_timeout='10000us'; +set statement_timeout = '1s' ; NEW_CONNECTION; set -statement_timeout='10000us'; +statement_timeout += +'1s' +; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set statement_timeout='10000us'; +foo set statement_timeout = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='10000us' bar; +set statement_timeout = '1s' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set statement_timeout='10000us'; +%set statement_timeout = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='10000us'%; +set statement_timeout = '1s' %; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%statement_timeout='10000us'; +set statement_timeout = '1s'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set statement_timeout='10000us'; +_set statement_timeout = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='10000us'_; +set statement_timeout = '1s' _; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_statement_timeout='10000us'; +set statement_timeout = '1s'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set statement_timeout='10000us'; +&set statement_timeout = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='10000us'&; +set statement_timeout = '1s' &; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&statement_timeout='10000us'; +set statement_timeout = '1s'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set statement_timeout='10000us'; +$set statement_timeout = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='10000us'$; +set statement_timeout = '1s' $; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$statement_timeout='10000us'; +set statement_timeout = '1s'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set statement_timeout='10000us'; +@set statement_timeout = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='10000us'@; +set statement_timeout = '1s' @; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@statement_timeout='10000us'; +set statement_timeout = '1s'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set statement_timeout='10000us'; +!set statement_timeout = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='10000us'!; +set statement_timeout = '1s' !; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!statement_timeout='10000us'; +set statement_timeout = '1s'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set statement_timeout='10000us'; +*set statement_timeout = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='10000us'*; +set statement_timeout = '1s' *; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*statement_timeout='10000us'; +set statement_timeout = '1s'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set statement_timeout='10000us'; +(set statement_timeout = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='10000us'(; +set statement_timeout = '1s' (; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(statement_timeout='10000us'; +set statement_timeout = '1s'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set statement_timeout='10000us'; +)set statement_timeout = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='10000us'); +set statement_timeout = '1s' ); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)statement_timeout='10000us'; +set statement_timeout = '1s'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set statement_timeout='10000us'; +-set statement_timeout = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='10000us'-; +set statement_timeout = '1s' -; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-statement_timeout='10000us'; +set statement_timeout = '1s'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set statement_timeout='10000us'; ++set statement_timeout = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='10000us'+; +set statement_timeout = '1s' +; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+statement_timeout='10000us'; +set statement_timeout = '1s'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set statement_timeout='10000us'; +-#set statement_timeout = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='10000us'-#; +set statement_timeout = '1s' -#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#statement_timeout='10000us'; +set statement_timeout = '1s'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set statement_timeout='10000us'; +/set statement_timeout = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='10000us'/; +set statement_timeout = '1s' /; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/statement_timeout='10000us'; +set statement_timeout = '1s'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set statement_timeout='10000us'; +\set statement_timeout = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='10000us'\; +set statement_timeout = '1s' \; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\statement_timeout='10000us'; +set statement_timeout = '1s'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set statement_timeout='10000us'; +?set statement_timeout = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='10000us'?; +set statement_timeout = '1s' ?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?statement_timeout='10000us'; +set statement_timeout = '1s'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set statement_timeout='10000us'; +-/set statement_timeout = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='10000us'-/; +set statement_timeout = '1s' -/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/statement_timeout='10000us'; +set statement_timeout = '1s'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set statement_timeout='10000us'; +/#set statement_timeout = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='10000us'/#; +set statement_timeout = '1s' /#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#statement_timeout='10000us'; +set statement_timeout = '1s'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set statement_timeout='10000us'; +/-set statement_timeout = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='10000us'/-; +set statement_timeout = '1s' /-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-statement_timeout='10000us'; +set statement_timeout = '1s'/-; NEW_CONNECTION; -set statement_timeout='9223372036854775807ns'; +set statement_timeout=100; NEW_CONNECTION; -SET STATEMENT_TIMEOUT='9223372036854775807NS'; +SET STATEMENT_TIMEOUT=100; NEW_CONNECTION; -set statement_timeout='9223372036854775807ns'; +set statement_timeout=100; NEW_CONNECTION; - set statement_timeout='9223372036854775807ns'; + set statement_timeout=100; NEW_CONNECTION; - set statement_timeout='9223372036854775807ns'; + set statement_timeout=100; NEW_CONNECTION; -set statement_timeout='9223372036854775807ns'; +set statement_timeout=100; NEW_CONNECTION; -set statement_timeout='9223372036854775807ns' ; +set statement_timeout=100 ; NEW_CONNECTION; -set statement_timeout='9223372036854775807ns' ; +set statement_timeout=100 ; NEW_CONNECTION; -set statement_timeout='9223372036854775807ns' +set statement_timeout=100 ; NEW_CONNECTION; -set statement_timeout='9223372036854775807ns'; +set statement_timeout=100; NEW_CONNECTION; -set statement_timeout='9223372036854775807ns'; +set statement_timeout=100; NEW_CONNECTION; set -statement_timeout='9223372036854775807ns'; +statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set statement_timeout='9223372036854775807ns'; +foo set statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='9223372036854775807ns' bar; +set statement_timeout=100 bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set statement_timeout='9223372036854775807ns'; +%set statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='9223372036854775807ns'%; +set statement_timeout=100%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%statement_timeout='9223372036854775807ns'; +set%statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set statement_timeout='9223372036854775807ns'; +_set statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='9223372036854775807ns'_; +set statement_timeout=100_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_statement_timeout='9223372036854775807ns'; +set_statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set statement_timeout='9223372036854775807ns'; +&set statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='9223372036854775807ns'&; +set statement_timeout=100&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&statement_timeout='9223372036854775807ns'; +set&statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set statement_timeout='9223372036854775807ns'; +$set statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='9223372036854775807ns'$; +set statement_timeout=100$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$statement_timeout='9223372036854775807ns'; +set$statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set statement_timeout='9223372036854775807ns'; +@set statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='9223372036854775807ns'@; +set statement_timeout=100@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@statement_timeout='9223372036854775807ns'; +set@statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set statement_timeout='9223372036854775807ns'; +!set statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='9223372036854775807ns'!; +set statement_timeout=100!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!statement_timeout='9223372036854775807ns'; +set!statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set statement_timeout='9223372036854775807ns'; +*set statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='9223372036854775807ns'*; +set statement_timeout=100*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*statement_timeout='9223372036854775807ns'; +set*statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set statement_timeout='9223372036854775807ns'; +(set statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='9223372036854775807ns'(; +set statement_timeout=100(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(statement_timeout='9223372036854775807ns'; +set(statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set statement_timeout='9223372036854775807ns'; +)set statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='9223372036854775807ns'); +set statement_timeout=100); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)statement_timeout='9223372036854775807ns'; +set)statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set statement_timeout='9223372036854775807ns'; +-set statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='9223372036854775807ns'-; +set statement_timeout=100-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-statement_timeout='9223372036854775807ns'; +set-statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set statement_timeout='9223372036854775807ns'; ++set statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='9223372036854775807ns'+; +set statement_timeout=100+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+statement_timeout='9223372036854775807ns'; +set+statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set statement_timeout='9223372036854775807ns'; +-#set statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='9223372036854775807ns'-#; +set statement_timeout=100-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#statement_timeout='9223372036854775807ns'; +set-#statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set statement_timeout='9223372036854775807ns'; +/set statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='9223372036854775807ns'/; +set statement_timeout=100/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/statement_timeout='9223372036854775807ns'; +set/statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set statement_timeout='9223372036854775807ns'; +\set statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='9223372036854775807ns'\; +set statement_timeout=100\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\statement_timeout='9223372036854775807ns'; +set\statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set statement_timeout='9223372036854775807ns'; +?set statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='9223372036854775807ns'?; +set statement_timeout=100?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?statement_timeout='9223372036854775807ns'; +set?statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set statement_timeout='9223372036854775807ns'; +-/set statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='9223372036854775807ns'-/; +set statement_timeout=100-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/statement_timeout='9223372036854775807ns'; +set-/statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set statement_timeout='9223372036854775807ns'; +/#set statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='9223372036854775807ns'/#; +set statement_timeout=100/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#statement_timeout='9223372036854775807ns'; +set/#statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set statement_timeout='9223372036854775807ns'; +/-set statement_timeout=100; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_timeout='9223372036854775807ns'/-; +set statement_timeout=100/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-statement_timeout='9223372036854775807ns'; +set/-statement_timeout=100; NEW_CONNECTION; -set autocommit = false; -set transaction read only; +set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; -SET TRANSACTION READ ONLY; +SET STATEMENT_TIMEOUT = 100 ; NEW_CONNECTION; -set autocommit = false; -set transaction read only; +set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; - set transaction read only; + set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; - set transaction read only; + set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; -set transaction read only; +set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; -set transaction read only ; +set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; -set transaction read only ; +set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; -set transaction read only +set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; -set transaction read only; +set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; -set transaction read only; +set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; set -transaction -read -only; +statement_timeout += +100 +; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set transaction read only; +foo set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read only bar; +set statement_timeout = 100 bar; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -%set transaction read only; +%set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read only%; +set statement_timeout = 100 %; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read%only; +set statement_timeout = 100%; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -_set transaction read only; +_set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read only_; +set statement_timeout = 100 _; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read_only; +set statement_timeout = 100_; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -&set transaction read only; +&set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read only&; +set statement_timeout = 100 &; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read&only; +set statement_timeout = 100&; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -$set transaction read only; +$set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read only$; +set statement_timeout = 100 $; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read$only; +set statement_timeout = 100$; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -@set transaction read only; +@set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read only@; +set statement_timeout = 100 @; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read@only; +set statement_timeout = 100@; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -!set transaction read only; +!set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read only!; +set statement_timeout = 100 !; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read!only; +set statement_timeout = 100!; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -*set transaction read only; +*set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read only*; +set statement_timeout = 100 *; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read*only; +set statement_timeout = 100*; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -(set transaction read only; +(set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read only(; +set statement_timeout = 100 (; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read(only; +set statement_timeout = 100(; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -)set transaction read only; +)set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read only); +set statement_timeout = 100 ); NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read)only; +set statement_timeout = 100); NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --set transaction read only; +-set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read only-; +set statement_timeout = 100 -; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read-only; +set statement_timeout = 100-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -+set transaction read only; ++set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read only+; +set statement_timeout = 100 +; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read+only; +set statement_timeout = 100+; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --#set transaction read only; +-#set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read only-#; +set statement_timeout = 100 -#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read-#only; +set statement_timeout = 100-#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/set transaction read only; +/set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read only/; +set statement_timeout = 100 /; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read/only; +set statement_timeout = 100/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -\set transaction read only; +\set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read only\; +set statement_timeout = 100 \; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read\only; +set statement_timeout = 100\; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -?set transaction read only; +?set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read only?; +set statement_timeout = 100 ?; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read?only; +set statement_timeout = 100?; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --/set transaction read only; +-/set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read only-/; +set statement_timeout = 100 -/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read-/only; +set statement_timeout = 100-/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set transaction read only; +/#set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read only/#; +set statement_timeout = 100 /#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read/#only; +set statement_timeout = 100/#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set transaction read only; +/-set statement_timeout = 100 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read only/-; +set statement_timeout = 100 /-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read/-only; +set statement_timeout = 100/-; NEW_CONNECTION; -set autocommit = false; -set transaction read write; +set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; -SET TRANSACTION READ WRITE; +SET STATEMENT_TIMEOUT='100MS'; NEW_CONNECTION; -set autocommit = false; -set transaction read write; +set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; - set transaction read write; + set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; - set transaction read write; + set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; -set transaction read write; +set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; -set transaction read write ; +set statement_timeout='100ms' ; NEW_CONNECTION; -set autocommit = false; -set transaction read write ; +set statement_timeout='100ms' ; NEW_CONNECTION; -set autocommit = false; -set transaction read write +set statement_timeout='100ms' ; NEW_CONNECTION; -set autocommit = false; -set transaction read write; +set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; -set transaction read write; +set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; set -transaction -read -write; +statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set transaction read write; +foo set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read write bar; +set statement_timeout='100ms' bar; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -%set transaction read write; +%set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read write%; +set statement_timeout='100ms'%; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read%write; +set%statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -_set transaction read write; +_set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read write_; +set statement_timeout='100ms'_; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read_write; +set_statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -&set transaction read write; +&set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read write&; +set statement_timeout='100ms'&; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read&write; +set&statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -$set transaction read write; +$set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read write$; +set statement_timeout='100ms'$; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read$write; +set$statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -@set transaction read write; +@set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read write@; +set statement_timeout='100ms'@; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read@write; +set@statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -!set transaction read write; +!set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read write!; +set statement_timeout='100ms'!; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read!write; +set!statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -*set transaction read write; +*set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read write*; +set statement_timeout='100ms'*; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read*write; +set*statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -(set transaction read write; +(set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read write(; +set statement_timeout='100ms'(; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read(write; +set(statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -)set transaction read write; +)set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read write); +set statement_timeout='100ms'); NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read)write; +set)statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --set transaction read write; +-set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read write-; +set statement_timeout='100ms'-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read-write; +set-statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -+set transaction read write; ++set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read write+; +set statement_timeout='100ms'+; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read+write; +set+statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --#set transaction read write; +-#set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read write-#; +set statement_timeout='100ms'-#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read-#write; +set-#statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/set transaction read write; +/set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read write/; +set statement_timeout='100ms'/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read/write; +set/statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -\set transaction read write; +\set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read write\; +set statement_timeout='100ms'\; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read\write; +set\statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -?set transaction read write; +?set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read write?; +set statement_timeout='100ms'?; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read?write; +set?statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --/set transaction read write; +-/set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read write-/; +set statement_timeout='100ms'-/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read-/write; +set-/statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set transaction read write; +/#set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read write/#; +set statement_timeout='100ms'/#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read/#write; +set/#statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set transaction read write; +/-set statement_timeout='100ms'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read write/-; +set statement_timeout='100ms'/-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction read/-write; +set/-statement_timeout='100ms'; NEW_CONNECTION; -set read_only_staleness='STRONG'; +set statement_timeout='10000us'; NEW_CONNECTION; -SET READ_ONLY_STALENESS='STRONG'; +SET STATEMENT_TIMEOUT='10000US'; NEW_CONNECTION; -set read_only_staleness='strong'; +set statement_timeout='10000us'; NEW_CONNECTION; - set read_only_staleness='STRONG'; + set statement_timeout='10000us'; NEW_CONNECTION; - set read_only_staleness='STRONG'; + set statement_timeout='10000us'; NEW_CONNECTION; -set read_only_staleness='STRONG'; +set statement_timeout='10000us'; NEW_CONNECTION; -set read_only_staleness='STRONG' ; +set statement_timeout='10000us' ; NEW_CONNECTION; -set read_only_staleness='STRONG' ; +set statement_timeout='10000us' ; NEW_CONNECTION; -set read_only_staleness='STRONG' +set statement_timeout='10000us' ; NEW_CONNECTION; -set read_only_staleness='STRONG'; +set statement_timeout='10000us'; NEW_CONNECTION; -set read_only_staleness='STRONG'; +set statement_timeout='10000us'; NEW_CONNECTION; set -read_only_staleness='STRONG'; +statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set read_only_staleness='STRONG'; +foo set statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='STRONG' bar; +set statement_timeout='10000us' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set read_only_staleness='STRONG'; +%set statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='STRONG'%; +set statement_timeout='10000us'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%read_only_staleness='STRONG'; +set%statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set read_only_staleness='STRONG'; +_set statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='STRONG'_; +set statement_timeout='10000us'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_read_only_staleness='STRONG'; +set_statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set read_only_staleness='STRONG'; +&set statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='STRONG'&; +set statement_timeout='10000us'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&read_only_staleness='STRONG'; +set&statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set read_only_staleness='STRONG'; +$set statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='STRONG'$; +set statement_timeout='10000us'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$read_only_staleness='STRONG'; +set$statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set read_only_staleness='STRONG'; +@set statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='STRONG'@; +set statement_timeout='10000us'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@read_only_staleness='STRONG'; +set@statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set read_only_staleness='STRONG'; +!set statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='STRONG'!; +set statement_timeout='10000us'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!read_only_staleness='STRONG'; +set!statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set read_only_staleness='STRONG'; +*set statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='STRONG'*; +set statement_timeout='10000us'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*read_only_staleness='STRONG'; +set*statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set read_only_staleness='STRONG'; +(set statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='STRONG'(; +set statement_timeout='10000us'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(read_only_staleness='STRONG'; +set(statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set read_only_staleness='STRONG'; +)set statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='STRONG'); +set statement_timeout='10000us'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)read_only_staleness='STRONG'; +set)statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set read_only_staleness='STRONG'; +-set statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='STRONG'-; +set statement_timeout='10000us'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-read_only_staleness='STRONG'; +set-statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set read_only_staleness='STRONG'; ++set statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='STRONG'+; +set statement_timeout='10000us'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+read_only_staleness='STRONG'; +set+statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set read_only_staleness='STRONG'; +-#set statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='STRONG'-#; +set statement_timeout='10000us'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#read_only_staleness='STRONG'; +set-#statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set read_only_staleness='STRONG'; +/set statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='STRONG'/; +set statement_timeout='10000us'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/read_only_staleness='STRONG'; +set/statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set read_only_staleness='STRONG'; +\set statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='STRONG'\; +set statement_timeout='10000us'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\read_only_staleness='STRONG'; +set\statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set read_only_staleness='STRONG'; +?set statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='STRONG'?; +set statement_timeout='10000us'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?read_only_staleness='STRONG'; +set?statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set read_only_staleness='STRONG'; +-/set statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='STRONG'-/; +set statement_timeout='10000us'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/read_only_staleness='STRONG'; +set-/statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set read_only_staleness='STRONG'; +/#set statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='STRONG'/#; +set statement_timeout='10000us'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#read_only_staleness='STRONG'; +set/#statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set read_only_staleness='STRONG'; +/-set statement_timeout='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='STRONG'/-; +set statement_timeout='10000us'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-read_only_staleness='STRONG'; +set/-statement_timeout='10000us'; NEW_CONNECTION; -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +SET STATEMENT_TIMEOUT='9223372036854775807NS'; NEW_CONNECTION; -set read_only_staleness='min_read_timestamp 2018-01-02t03:04:05.123-08:00'; +set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; - set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; + set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; - set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; + set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00' ; +set statement_timeout='9223372036854775807ns' ; NEW_CONNECTION; -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00' ; +set statement_timeout='9223372036854775807ns' ; NEW_CONNECTION; -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00' +set statement_timeout='9223372036854775807ns' ; NEW_CONNECTION; -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; set -read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +foo set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00' bar; +set statement_timeout='9223372036854775807ns' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +%set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'%; +set statement_timeout='9223372036854775807ns'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP%2018-01-02T03:04:05.123-08:00'; +set%statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +_set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'_; +set statement_timeout='9223372036854775807ns'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP_2018-01-02T03:04:05.123-08:00'; +set_statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +&set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'&; +set statement_timeout='9223372036854775807ns'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP&2018-01-02T03:04:05.123-08:00'; +set&statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +$set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'$; +set statement_timeout='9223372036854775807ns'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP$2018-01-02T03:04:05.123-08:00'; +set$statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +@set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'@; +set statement_timeout='9223372036854775807ns'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP@2018-01-02T03:04:05.123-08:00'; +set@statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +!set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'!; +set statement_timeout='9223372036854775807ns'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP!2018-01-02T03:04:05.123-08:00'; +set!statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +*set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'*; +set statement_timeout='9223372036854775807ns'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP*2018-01-02T03:04:05.123-08:00'; +set*statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +(set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'(; +set statement_timeout='9223372036854775807ns'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP(2018-01-02T03:04:05.123-08:00'; +set(statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +)set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'); +set statement_timeout='9223372036854775807ns'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP)2018-01-02T03:04:05.123-08:00'; +set)statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +-set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'-; +set statement_timeout='9223372036854775807ns'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP-2018-01-02T03:04:05.123-08:00'; +set-statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; ++set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'+; +set statement_timeout='9223372036854775807ns'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP+2018-01-02T03:04:05.123-08:00'; +set+statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +-#set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'-#; +set statement_timeout='9223372036854775807ns'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP-#2018-01-02T03:04:05.123-08:00'; +set-#statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +/set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'/; +set statement_timeout='9223372036854775807ns'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP/2018-01-02T03:04:05.123-08:00'; +set/statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +\set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'\; +set statement_timeout='9223372036854775807ns'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP\2018-01-02T03:04:05.123-08:00'; +set\statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +?set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'?; +set statement_timeout='9223372036854775807ns'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP?2018-01-02T03:04:05.123-08:00'; +set?statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +-/set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'-/; +set statement_timeout='9223372036854775807ns'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP-/2018-01-02T03:04:05.123-08:00'; +set-/statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +/#set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'/#; +set statement_timeout='9223372036854775807ns'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP/#2018-01-02T03:04:05.123-08:00'; +set/#statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; +/-set statement_timeout='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'/-; +set statement_timeout='9223372036854775807ns'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP/-2018-01-02T03:04:05.123-08:00'; +set/-statement_timeout='9223372036854775807ns'; NEW_CONNECTION; -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +set autocommit = false; +set transaction read only; NEW_CONNECTION; -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +set autocommit = false; +SET TRANSACTION READ ONLY; NEW_CONNECTION; -set read_only_staleness='min_read_timestamp 2018-01-02t03:04:05.123z'; +set autocommit = false; +set transaction read only; NEW_CONNECTION; - set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +set autocommit = false; + set transaction read only; NEW_CONNECTION; - set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +set autocommit = false; + set transaction read only; NEW_CONNECTION; +set autocommit = false; -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +set transaction read only; NEW_CONNECTION; -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z' ; +set autocommit = false; +set transaction read only ; NEW_CONNECTION; -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z' ; +set autocommit = false; +set transaction read only ; NEW_CONNECTION; -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z' +set autocommit = false; +set transaction read only ; NEW_CONNECTION; -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +set autocommit = false; +set transaction read only; NEW_CONNECTION; -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +set autocommit = false; +set transaction read only; NEW_CONNECTION; +set autocommit = false; set -read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +transaction +read +only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +foo set transaction read only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z' bar; +set transaction read only bar; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -%set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +%set transaction read only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'%; +set transaction read only%; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP%2018-01-02T03:04:05.123Z'; +set transaction read%only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -_set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +_set transaction read only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'_; +set transaction read only_; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP_2018-01-02T03:04:05.123Z'; +set transaction read_only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -&set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +&set transaction read only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'&; +set transaction read only&; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP&2018-01-02T03:04:05.123Z'; +set transaction read&only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -$set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +$set transaction read only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'$; +set transaction read only$; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP$2018-01-02T03:04:05.123Z'; +set transaction read$only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -@set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +@set transaction read only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'@; +set transaction read only@; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP@2018-01-02T03:04:05.123Z'; +set transaction read@only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -!set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +!set transaction read only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'!; +set transaction read only!; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP!2018-01-02T03:04:05.123Z'; +set transaction read!only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -*set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +*set transaction read only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'*; +set transaction read only*; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP*2018-01-02T03:04:05.123Z'; +set transaction read*only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -(set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +(set transaction read only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'(; +set transaction read only(; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP(2018-01-02T03:04:05.123Z'; +set transaction read(only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -)set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +)set transaction read only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'); +set transaction read only); NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP)2018-01-02T03:04:05.123Z'; +set transaction read)only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +-set transaction read only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'-; +set transaction read only-; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP-2018-01-02T03:04:05.123Z'; +set transaction read-only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -+set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; ++set transaction read only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'+; +set transaction read only+; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP+2018-01-02T03:04:05.123Z'; +set transaction read+only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --#set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +-#set transaction read only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'-#; +set transaction read only-#; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP-#2018-01-02T03:04:05.123Z'; +set transaction read-#only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +/set transaction read only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'/; +set transaction read only/; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP/2018-01-02T03:04:05.123Z'; +set transaction read/only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -\set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +\set transaction read only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'\; +set transaction read only\; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP\2018-01-02T03:04:05.123Z'; +set transaction read\only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -?set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +?set transaction read only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'?; +set transaction read only?; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP?2018-01-02T03:04:05.123Z'; +set transaction read?only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --/set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +-/set transaction read only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'-/; +set transaction read only-/; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP-/2018-01-02T03:04:05.123Z'; +set transaction read-/only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +/#set transaction read only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'/#; +set transaction read only/#; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP/#2018-01-02T03:04:05.123Z'; +set transaction read/#only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; +/-set transaction read only; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'/-; +set transaction read only/-; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP/-2018-01-02T03:04:05.123Z'; +set transaction read/-only; NEW_CONNECTION; -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +set autocommit = false; +set transaction read write; NEW_CONNECTION; -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +set autocommit = false; +SET TRANSACTION READ WRITE; NEW_CONNECTION; -set read_only_staleness='min_read_timestamp 2018-01-02t03:04:05.123+07:45'; +set autocommit = false; +set transaction read write; NEW_CONNECTION; - set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +set autocommit = false; + set transaction read write; NEW_CONNECTION; - set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +set autocommit = false; + set transaction read write; NEW_CONNECTION; +set autocommit = false; -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +set transaction read write; NEW_CONNECTION; -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45' ; +set autocommit = false; +set transaction read write ; NEW_CONNECTION; -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45' ; +set autocommit = false; +set transaction read write ; NEW_CONNECTION; -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45' +set autocommit = false; +set transaction read write ; NEW_CONNECTION; -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +set autocommit = false; +set transaction read write; NEW_CONNECTION; -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +set autocommit = false; +set transaction read write; NEW_CONNECTION; +set autocommit = false; set -read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +transaction +read +write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +foo set transaction read write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45' bar; +set transaction read write bar; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -%set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +%set transaction read write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'%; +set transaction read write%; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP%2018-01-02T03:04:05.123+07:45'; +set transaction read%write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -_set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +_set transaction read write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'_; +set transaction read write_; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP_2018-01-02T03:04:05.123+07:45'; +set transaction read_write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -&set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +&set transaction read write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'&; +set transaction read write&; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP&2018-01-02T03:04:05.123+07:45'; +set transaction read&write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -$set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +$set transaction read write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'$; +set transaction read write$; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP$2018-01-02T03:04:05.123+07:45'; +set transaction read$write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -@set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +@set transaction read write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'@; +set transaction read write@; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP@2018-01-02T03:04:05.123+07:45'; +set transaction read@write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -!set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +!set transaction read write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'!; +set transaction read write!; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP!2018-01-02T03:04:05.123+07:45'; +set transaction read!write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -*set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +*set transaction read write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'*; +set transaction read write*; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP*2018-01-02T03:04:05.123+07:45'; +set transaction read*write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -(set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +(set transaction read write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'(; +set transaction read write(; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP(2018-01-02T03:04:05.123+07:45'; +set transaction read(write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -)set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +)set transaction read write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'); +set transaction read write); NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP)2018-01-02T03:04:05.123+07:45'; +set transaction read)write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +-set transaction read write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'-; +set transaction read write-; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP-2018-01-02T03:04:05.123+07:45'; +set transaction read-write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -+set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; ++set transaction read write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'+; +set transaction read write+; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP+2018-01-02T03:04:05.123+07:45'; +set transaction read+write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --#set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +-#set transaction read write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'-#; +set transaction read write-#; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP-#2018-01-02T03:04:05.123+07:45'; +set transaction read-#write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +/set transaction read write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'/; +set transaction read write/; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP/2018-01-02T03:04:05.123+07:45'; +set transaction read/write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -\set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +\set transaction read write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'\; +set transaction read write\; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP\2018-01-02T03:04:05.123+07:45'; +set transaction read\write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -?set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +?set transaction read write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'?; +set transaction read write?; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP?2018-01-02T03:04:05.123+07:45'; +set transaction read?write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --/set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +-/set transaction read write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'-/; +set transaction read write-/; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP-/2018-01-02T03:04:05.123+07:45'; +set transaction read-/write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +/#set transaction read write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'/#; +set transaction read write/#; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP/#2018-01-02T03:04:05.123+07:45'; +set transaction read/#write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; +/-set transaction read write; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'/-; +set transaction read write/-; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MIN_READ_TIMESTAMP/-2018-01-02T03:04:05.123+07:45'; +set transaction read/-write; NEW_CONNECTION; -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +set read_only_staleness='STRONG'; NEW_CONNECTION; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +SET READ_ONLY_STALENESS='STRONG'; NEW_CONNECTION; -set read_only_staleness='read_timestamp 2018-01-02t03:04:05.54321-07:00'; +set read_only_staleness='strong'; NEW_CONNECTION; - set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; + set read_only_staleness='STRONG'; NEW_CONNECTION; - set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; + set read_only_staleness='STRONG'; NEW_CONNECTION; -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +set read_only_staleness='STRONG'; NEW_CONNECTION; -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00' ; +set read_only_staleness='STRONG' ; NEW_CONNECTION; -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00' ; +set read_only_staleness='STRONG' ; NEW_CONNECTION; -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00' +set read_only_staleness='STRONG' ; NEW_CONNECTION; -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +set read_only_staleness='STRONG'; NEW_CONNECTION; -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +set read_only_staleness='STRONG'; NEW_CONNECTION; set -read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +foo set read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00' bar; +set read_only_staleness='STRONG' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +%set read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'%; +set read_only_staleness='STRONG'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP%2018-01-02T03:04:05.54321-07:00'; +set%read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +_set read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'_; +set read_only_staleness='STRONG'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP_2018-01-02T03:04:05.54321-07:00'; +set_read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +&set read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'&; +set read_only_staleness='STRONG'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP&2018-01-02T03:04:05.54321-07:00'; +set&read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +$set read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'$; +set read_only_staleness='STRONG'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP$2018-01-02T03:04:05.54321-07:00'; +set$read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +@set read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'@; +set read_only_staleness='STRONG'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP@2018-01-02T03:04:05.54321-07:00'; +set@read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +!set read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'!; +set read_only_staleness='STRONG'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP!2018-01-02T03:04:05.54321-07:00'; +set!read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +*set read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'*; +set read_only_staleness='STRONG'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP*2018-01-02T03:04:05.54321-07:00'; +set*read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +(set read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'(; +set read_only_staleness='STRONG'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP(2018-01-02T03:04:05.54321-07:00'; +set(read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +)set read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'); +set read_only_staleness='STRONG'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP)2018-01-02T03:04:05.54321-07:00'; +set)read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +-set read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'-; +set read_only_staleness='STRONG'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP-2018-01-02T03:04:05.54321-07:00'; +set-read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; ++set read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'+; +set read_only_staleness='STRONG'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP+2018-01-02T03:04:05.54321-07:00'; +set+read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +-#set read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'-#; +set read_only_staleness='STRONG'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP-#2018-01-02T03:04:05.54321-07:00'; +set-#read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +/set read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'/; +set read_only_staleness='STRONG'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP/2018-01-02T03:04:05.54321-07:00'; +set/read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +\set read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'\; +set read_only_staleness='STRONG'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP\2018-01-02T03:04:05.54321-07:00'; +set\read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +?set read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'?; +set read_only_staleness='STRONG'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP?2018-01-02T03:04:05.54321-07:00'; +set?read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +-/set read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'-/; +set read_only_staleness='STRONG'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP-/2018-01-02T03:04:05.54321-07:00'; +set-/read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +/#set read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'/#; +set read_only_staleness='STRONG'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP/#2018-01-02T03:04:05.54321-07:00'; +set/#read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; +/-set read_only_staleness='STRONG'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'/-; +set read_only_staleness='STRONG'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP/-2018-01-02T03:04:05.54321-07:00'; +set/-read_only_staleness='STRONG'; NEW_CONNECTION; -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; -set read_only_staleness='read_timestamp 2018-01-02t03:04:05.54321z'; +set read_only_staleness='min_read_timestamp 2018-01-02t03:04:05.123-08:00'; NEW_CONNECTION; - set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; + set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; - set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; + set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z' ; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00' ; NEW_CONNECTION; -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z' ; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00' ; NEW_CONNECTION; -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z' +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00' ; NEW_CONNECTION; -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; set -read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +foo set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z' bar; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +%set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'%; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP%2018-01-02T03:04:05.54321Z'; +set read_only_staleness='MIN_READ_TIMESTAMP%2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +_set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'_; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP_2018-01-02T03:04:05.54321Z'; +set read_only_staleness='MIN_READ_TIMESTAMP_2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +&set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'&; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP&2018-01-02T03:04:05.54321Z'; +set read_only_staleness='MIN_READ_TIMESTAMP&2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +$set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'$; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP$2018-01-02T03:04:05.54321Z'; +set read_only_staleness='MIN_READ_TIMESTAMP$2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +@set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'@; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP@2018-01-02T03:04:05.54321Z'; +set read_only_staleness='MIN_READ_TIMESTAMP@2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +!set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'!; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP!2018-01-02T03:04:05.54321Z'; +set read_only_staleness='MIN_READ_TIMESTAMP!2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +*set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'*; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP*2018-01-02T03:04:05.54321Z'; +set read_only_staleness='MIN_READ_TIMESTAMP*2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +(set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'(; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP(2018-01-02T03:04:05.54321Z'; +set read_only_staleness='MIN_READ_TIMESTAMP(2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +)set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'); +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP)2018-01-02T03:04:05.54321Z'; +set read_only_staleness='MIN_READ_TIMESTAMP)2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +-set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'-; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP-2018-01-02T03:04:05.54321Z'; +set read_only_staleness='MIN_READ_TIMESTAMP-2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; ++set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'+; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP+2018-01-02T03:04:05.54321Z'; +set read_only_staleness='MIN_READ_TIMESTAMP+2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +-#set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'-#; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP-#2018-01-02T03:04:05.54321Z'; +set read_only_staleness='MIN_READ_TIMESTAMP-#2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +/set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'/; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP/2018-01-02T03:04:05.54321Z'; +set read_only_staleness='MIN_READ_TIMESTAMP/2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +\set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'\; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP\2018-01-02T03:04:05.54321Z'; +set read_only_staleness='MIN_READ_TIMESTAMP\2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +?set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'?; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP?2018-01-02T03:04:05.54321Z'; +set read_only_staleness='MIN_READ_TIMESTAMP?2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +-/set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'-/; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP-/2018-01-02T03:04:05.54321Z'; +set read_only_staleness='MIN_READ_TIMESTAMP-/2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +/#set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'/#; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP/#2018-01-02T03:04:05.54321Z'; +set read_only_staleness='MIN_READ_TIMESTAMP/#2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; +/-set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'/-; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123-08:00'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP/-2018-01-02T03:04:05.54321Z'; +set read_only_staleness='MIN_READ_TIMESTAMP/-2018-01-02T03:04:05.123-08:00'; NEW_CONNECTION; -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; -set read_only_staleness='read_timestamp 2018-01-02t03:04:05.54321+05:30'; +set read_only_staleness='min_read_timestamp 2018-01-02t03:04:05.123z'; NEW_CONNECTION; - set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; + set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; - set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; + set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30' ; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z' ; NEW_CONNECTION; -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30' ; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z' ; NEW_CONNECTION; -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30' +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z' ; NEW_CONNECTION; -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; set -read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +foo set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30' bar; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +%set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'%; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP%2018-01-02T03:04:05.54321+05:30'; +set read_only_staleness='MIN_READ_TIMESTAMP%2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +_set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'_; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP_2018-01-02T03:04:05.54321+05:30'; +set read_only_staleness='MIN_READ_TIMESTAMP_2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +&set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'&; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP&2018-01-02T03:04:05.54321+05:30'; +set read_only_staleness='MIN_READ_TIMESTAMP&2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +$set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'$; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP$2018-01-02T03:04:05.54321+05:30'; +set read_only_staleness='MIN_READ_TIMESTAMP$2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +@set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'@; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP@2018-01-02T03:04:05.54321+05:30'; +set read_only_staleness='MIN_READ_TIMESTAMP@2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +!set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'!; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP!2018-01-02T03:04:05.54321+05:30'; +set read_only_staleness='MIN_READ_TIMESTAMP!2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +*set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'*; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP*2018-01-02T03:04:05.54321+05:30'; +set read_only_staleness='MIN_READ_TIMESTAMP*2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +(set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'(; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP(2018-01-02T03:04:05.54321+05:30'; +set read_only_staleness='MIN_READ_TIMESTAMP(2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +)set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'); +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP)2018-01-02T03:04:05.54321+05:30'; +set read_only_staleness='MIN_READ_TIMESTAMP)2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +-set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'-; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP-2018-01-02T03:04:05.54321+05:30'; +set read_only_staleness='MIN_READ_TIMESTAMP-2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; ++set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'+; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP+2018-01-02T03:04:05.54321+05:30'; +set read_only_staleness='MIN_READ_TIMESTAMP+2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +-#set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'-#; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP-#2018-01-02T03:04:05.54321+05:30'; +set read_only_staleness='MIN_READ_TIMESTAMP-#2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +/set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'/; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP/2018-01-02T03:04:05.54321+05:30'; +set read_only_staleness='MIN_READ_TIMESTAMP/2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +\set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'\; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP\2018-01-02T03:04:05.54321+05:30'; +set read_only_staleness='MIN_READ_TIMESTAMP\2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +?set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'?; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP?2018-01-02T03:04:05.54321+05:30'; +set read_only_staleness='MIN_READ_TIMESTAMP?2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +-/set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'-/; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP-/2018-01-02T03:04:05.54321+05:30'; +set read_only_staleness='MIN_READ_TIMESTAMP-/2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +/#set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'/#; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP/#2018-01-02T03:04:05.54321+05:30'; +set read_only_staleness='MIN_READ_TIMESTAMP/#2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; +/-set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'/-; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123Z'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='READ_TIMESTAMP/-2018-01-02T03:04:05.54321+05:30'; +set read_only_staleness='MIN_READ_TIMESTAMP/-2018-01-02T03:04:05.123Z'; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 12s'; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; -SET READ_ONLY_STALENESS='MAX_STALENESS 12S'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; -set read_only_staleness='max_staleness 12s'; +set read_only_staleness='min_read_timestamp 2018-01-02t03:04:05.123+07:45'; NEW_CONNECTION; - set read_only_staleness='MAX_STALENESS 12s'; + set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; - set read_only_staleness='MAX_STALENESS 12s'; + set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 12s'; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 12s' ; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45' ; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 12s' ; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45' ; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 12s' +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45' ; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 12s'; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 12s'; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; set -read_only_staleness='MAX_STALENESS 12s'; +read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set read_only_staleness='MAX_STALENESS 12s'; +foo set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 12s' bar; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set read_only_staleness='MAX_STALENESS 12s'; +%set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 12s'%; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS%12s'; +set read_only_staleness='MIN_READ_TIMESTAMP%2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set read_only_staleness='MAX_STALENESS 12s'; +_set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 12s'_; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS_12s'; +set read_only_staleness='MIN_READ_TIMESTAMP_2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set read_only_staleness='MAX_STALENESS 12s'; +&set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 12s'&; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS&12s'; +set read_only_staleness='MIN_READ_TIMESTAMP&2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set read_only_staleness='MAX_STALENESS 12s'; +$set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 12s'$; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS$12s'; +set read_only_staleness='MIN_READ_TIMESTAMP$2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set read_only_staleness='MAX_STALENESS 12s'; +@set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 12s'@; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS@12s'; +set read_only_staleness='MIN_READ_TIMESTAMP@2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set read_only_staleness='MAX_STALENESS 12s'; +!set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 12s'!; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS!12s'; +set read_only_staleness='MIN_READ_TIMESTAMP!2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set read_only_staleness='MAX_STALENESS 12s'; +*set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 12s'*; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS*12s'; +set read_only_staleness='MIN_READ_TIMESTAMP*2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set read_only_staleness='MAX_STALENESS 12s'; +(set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 12s'(; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS(12s'; +set read_only_staleness='MIN_READ_TIMESTAMP(2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set read_only_staleness='MAX_STALENESS 12s'; +)set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 12s'); +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS)12s'; +set read_only_staleness='MIN_READ_TIMESTAMP)2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set read_only_staleness='MAX_STALENESS 12s'; +-set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 12s'-; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS-12s'; +set read_only_staleness='MIN_READ_TIMESTAMP-2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set read_only_staleness='MAX_STALENESS 12s'; ++set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 12s'+; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS+12s'; +set read_only_staleness='MIN_READ_TIMESTAMP+2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set read_only_staleness='MAX_STALENESS 12s'; +-#set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 12s'-#; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS-#12s'; +set read_only_staleness='MIN_READ_TIMESTAMP-#2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set read_only_staleness='MAX_STALENESS 12s'; +/set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 12s'/; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS/12s'; +set read_only_staleness='MIN_READ_TIMESTAMP/2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set read_only_staleness='MAX_STALENESS 12s'; +\set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 12s'\; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS\12s'; +set read_only_staleness='MIN_READ_TIMESTAMP\2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set read_only_staleness='MAX_STALENESS 12s'; +?set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 12s'?; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS?12s'; +set read_only_staleness='MIN_READ_TIMESTAMP?2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set read_only_staleness='MAX_STALENESS 12s'; +-/set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 12s'-/; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS-/12s'; +set read_only_staleness='MIN_READ_TIMESTAMP-/2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set read_only_staleness='MAX_STALENESS 12s'; +/#set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 12s'/#; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS/#12s'; +set read_only_staleness='MIN_READ_TIMESTAMP/#2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set read_only_staleness='MAX_STALENESS 12s'; +/-set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 12s'/-; +set read_only_staleness='MIN_READ_TIMESTAMP 2018-01-02T03:04:05.123+07:45'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS/-12s'; +set read_only_staleness='MIN_READ_TIMESTAMP/-2018-01-02T03:04:05.123+07:45'; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 100ms'; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; -SET READ_ONLY_STALENESS='MAX_STALENESS 100MS'; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; -set read_only_staleness='max_staleness 100ms'; +set read_only_staleness='read_timestamp 2018-01-02t03:04:05.54321-07:00'; NEW_CONNECTION; - set read_only_staleness='MAX_STALENESS 100ms'; + set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; - set read_only_staleness='MAX_STALENESS 100ms'; + set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 100ms'; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 100ms' ; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00' ; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 100ms' ; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00' ; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 100ms' +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00' ; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 100ms'; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 100ms'; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; set -read_only_staleness='MAX_STALENESS 100ms'; +read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set read_only_staleness='MAX_STALENESS 100ms'; +foo set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 100ms' bar; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set read_only_staleness='MAX_STALENESS 100ms'; +%set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 100ms'%; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS%100ms'; +set read_only_staleness='READ_TIMESTAMP%2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set read_only_staleness='MAX_STALENESS 100ms'; +_set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 100ms'_; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS_100ms'; +set read_only_staleness='READ_TIMESTAMP_2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set read_only_staleness='MAX_STALENESS 100ms'; +&set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 100ms'&; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS&100ms'; +set read_only_staleness='READ_TIMESTAMP&2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set read_only_staleness='MAX_STALENESS 100ms'; +$set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 100ms'$; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS$100ms'; +set read_only_staleness='READ_TIMESTAMP$2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set read_only_staleness='MAX_STALENESS 100ms'; +@set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 100ms'@; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS@100ms'; +set read_only_staleness='READ_TIMESTAMP@2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set read_only_staleness='MAX_STALENESS 100ms'; +!set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 100ms'!; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS!100ms'; +set read_only_staleness='READ_TIMESTAMP!2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set read_only_staleness='MAX_STALENESS 100ms'; +*set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 100ms'*; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS*100ms'; +set read_only_staleness='READ_TIMESTAMP*2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set read_only_staleness='MAX_STALENESS 100ms'; +(set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 100ms'(; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS(100ms'; +set read_only_staleness='READ_TIMESTAMP(2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set read_only_staleness='MAX_STALENESS 100ms'; +)set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 100ms'); +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS)100ms'; +set read_only_staleness='READ_TIMESTAMP)2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set read_only_staleness='MAX_STALENESS 100ms'; +-set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 100ms'-; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS-100ms'; +set read_only_staleness='READ_TIMESTAMP-2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set read_only_staleness='MAX_STALENESS 100ms'; ++set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 100ms'+; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS+100ms'; +set read_only_staleness='READ_TIMESTAMP+2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set read_only_staleness='MAX_STALENESS 100ms'; +-#set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 100ms'-#; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS-#100ms'; +set read_only_staleness='READ_TIMESTAMP-#2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set read_only_staleness='MAX_STALENESS 100ms'; +/set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 100ms'/; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS/100ms'; +set read_only_staleness='READ_TIMESTAMP/2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set read_only_staleness='MAX_STALENESS 100ms'; +\set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 100ms'\; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS\100ms'; +set read_only_staleness='READ_TIMESTAMP\2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set read_only_staleness='MAX_STALENESS 100ms'; +?set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 100ms'?; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS?100ms'; +set read_only_staleness='READ_TIMESTAMP?2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set read_only_staleness='MAX_STALENESS 100ms'; +-/set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 100ms'-/; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS-/100ms'; +set read_only_staleness='READ_TIMESTAMP-/2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set read_only_staleness='MAX_STALENESS 100ms'; +/#set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 100ms'/#; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS/#100ms'; +set read_only_staleness='READ_TIMESTAMP/#2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set read_only_staleness='MAX_STALENESS 100ms'; +/-set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 100ms'/-; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321-07:00'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS/-100ms'; +set read_only_staleness='READ_TIMESTAMP/-2018-01-02T03:04:05.54321-07:00'; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 99999us'; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; -SET READ_ONLY_STALENESS='MAX_STALENESS 99999US'; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; -set read_only_staleness='max_staleness 99999us'; +set read_only_staleness='read_timestamp 2018-01-02t03:04:05.54321z'; NEW_CONNECTION; - set read_only_staleness='MAX_STALENESS 99999us'; + set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; - set read_only_staleness='MAX_STALENESS 99999us'; + set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 99999us'; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 99999us' ; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z' ; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 99999us' ; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z' ; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 99999us' +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z' ; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 99999us'; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 99999us'; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; set -read_only_staleness='MAX_STALENESS 99999us'; +read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set read_only_staleness='MAX_STALENESS 99999us'; +foo set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 99999us' bar; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set read_only_staleness='MAX_STALENESS 99999us'; +%set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 99999us'%; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS%99999us'; +set read_only_staleness='READ_TIMESTAMP%2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set read_only_staleness='MAX_STALENESS 99999us'; +_set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 99999us'_; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS_99999us'; +set read_only_staleness='READ_TIMESTAMP_2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set read_only_staleness='MAX_STALENESS 99999us'; +&set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 99999us'&; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS&99999us'; +set read_only_staleness='READ_TIMESTAMP&2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set read_only_staleness='MAX_STALENESS 99999us'; +$set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 99999us'$; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS$99999us'; +set read_only_staleness='READ_TIMESTAMP$2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set read_only_staleness='MAX_STALENESS 99999us'; +@set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 99999us'@; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS@99999us'; +set read_only_staleness='READ_TIMESTAMP@2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set read_only_staleness='MAX_STALENESS 99999us'; +!set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 99999us'!; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS!99999us'; +set read_only_staleness='READ_TIMESTAMP!2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set read_only_staleness='MAX_STALENESS 99999us'; +*set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 99999us'*; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS*99999us'; +set read_only_staleness='READ_TIMESTAMP*2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set read_only_staleness='MAX_STALENESS 99999us'; +(set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 99999us'(; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS(99999us'; +set read_only_staleness='READ_TIMESTAMP(2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set read_only_staleness='MAX_STALENESS 99999us'; +)set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 99999us'); +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS)99999us'; +set read_only_staleness='READ_TIMESTAMP)2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set read_only_staleness='MAX_STALENESS 99999us'; +-set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 99999us'-; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS-99999us'; +set read_only_staleness='READ_TIMESTAMP-2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set read_only_staleness='MAX_STALENESS 99999us'; ++set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 99999us'+; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS+99999us'; +set read_only_staleness='READ_TIMESTAMP+2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set read_only_staleness='MAX_STALENESS 99999us'; +-#set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 99999us'-#; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS-#99999us'; +set read_only_staleness='READ_TIMESTAMP-#2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set read_only_staleness='MAX_STALENESS 99999us'; +/set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 99999us'/; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS/99999us'; +set read_only_staleness='READ_TIMESTAMP/2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set read_only_staleness='MAX_STALENESS 99999us'; +\set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 99999us'\; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS\99999us'; +set read_only_staleness='READ_TIMESTAMP\2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set read_only_staleness='MAX_STALENESS 99999us'; +?set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 99999us'?; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS?99999us'; +set read_only_staleness='READ_TIMESTAMP?2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set read_only_staleness='MAX_STALENESS 99999us'; +-/set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 99999us'-/; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS-/99999us'; +set read_only_staleness='READ_TIMESTAMP-/2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set read_only_staleness='MAX_STALENESS 99999us'; +/#set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 99999us'/#; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS/#99999us'; +set read_only_staleness='READ_TIMESTAMP/#2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set read_only_staleness='MAX_STALENESS 99999us'; +/-set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 99999us'/-; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321Z'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS/-99999us'; +set read_only_staleness='READ_TIMESTAMP/-2018-01-02T03:04:05.54321Z'; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 10ns'; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; -SET READ_ONLY_STALENESS='MAX_STALENESS 10NS'; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; -set read_only_staleness='max_staleness 10ns'; +set read_only_staleness='read_timestamp 2018-01-02t03:04:05.54321+05:30'; NEW_CONNECTION; - set read_only_staleness='MAX_STALENESS 10ns'; + set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; - set read_only_staleness='MAX_STALENESS 10ns'; + set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 10ns'; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 10ns' ; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30' ; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 10ns' ; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30' ; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 10ns' +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30' ; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 10ns'; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; -set read_only_staleness='MAX_STALENESS 10ns'; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; set -read_only_staleness='MAX_STALENESS 10ns'; +read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set read_only_staleness='MAX_STALENESS 10ns'; +foo set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 10ns' bar; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set read_only_staleness='MAX_STALENESS 10ns'; +%set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 10ns'%; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS%10ns'; +set read_only_staleness='READ_TIMESTAMP%2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set read_only_staleness='MAX_STALENESS 10ns'; +_set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 10ns'_; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS_10ns'; +set read_only_staleness='READ_TIMESTAMP_2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set read_only_staleness='MAX_STALENESS 10ns'; +&set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 10ns'&; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS&10ns'; +set read_only_staleness='READ_TIMESTAMP&2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set read_only_staleness='MAX_STALENESS 10ns'; +$set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 10ns'$; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS$10ns'; +set read_only_staleness='READ_TIMESTAMP$2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set read_only_staleness='MAX_STALENESS 10ns'; +@set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 10ns'@; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS@10ns'; +set read_only_staleness='READ_TIMESTAMP@2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set read_only_staleness='MAX_STALENESS 10ns'; +!set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 10ns'!; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS!10ns'; +set read_only_staleness='READ_TIMESTAMP!2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set read_only_staleness='MAX_STALENESS 10ns'; +*set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 10ns'*; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS*10ns'; +set read_only_staleness='READ_TIMESTAMP*2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set read_only_staleness='MAX_STALENESS 10ns'; +(set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 10ns'(; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS(10ns'; +set read_only_staleness='READ_TIMESTAMP(2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set read_only_staleness='MAX_STALENESS 10ns'; +)set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 10ns'); +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS)10ns'; +set read_only_staleness='READ_TIMESTAMP)2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set read_only_staleness='MAX_STALENESS 10ns'; +-set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 10ns'-; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS-10ns'; +set read_only_staleness='READ_TIMESTAMP-2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set read_only_staleness='MAX_STALENESS 10ns'; ++set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 10ns'+; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS+10ns'; +set read_only_staleness='READ_TIMESTAMP+2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set read_only_staleness='MAX_STALENESS 10ns'; +-#set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 10ns'-#; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS-#10ns'; +set read_only_staleness='READ_TIMESTAMP-#2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set read_only_staleness='MAX_STALENESS 10ns'; +/set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 10ns'/; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS/10ns'; +set read_only_staleness='READ_TIMESTAMP/2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set read_only_staleness='MAX_STALENESS 10ns'; +\set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 10ns'\; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS\10ns'; +set read_only_staleness='READ_TIMESTAMP\2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set read_only_staleness='MAX_STALENESS 10ns'; +?set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 10ns'?; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS?10ns'; +set read_only_staleness='READ_TIMESTAMP?2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set read_only_staleness='MAX_STALENESS 10ns'; +-/set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 10ns'-/; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS-/10ns'; +set read_only_staleness='READ_TIMESTAMP-/2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set read_only_staleness='MAX_STALENESS 10ns'; +/#set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 10ns'/#; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS/#10ns'; +set read_only_staleness='READ_TIMESTAMP/#2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set read_only_staleness='MAX_STALENESS 10ns'; +/-set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS 10ns'/-; +set read_only_staleness='READ_TIMESTAMP 2018-01-02T03:04:05.54321+05:30'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='MAX_STALENESS/-10ns'; +set read_only_staleness='READ_TIMESTAMP/-2018-01-02T03:04:05.54321+05:30'; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 15s'; +set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; -SET READ_ONLY_STALENESS='EXACT_STALENESS 15S'; +SET READ_ONLY_STALENESS='MAX_STALENESS 12S'; NEW_CONNECTION; -set read_only_staleness='exact_staleness 15s'; +set read_only_staleness='max_staleness 12s'; NEW_CONNECTION; - set read_only_staleness='EXACT_STALENESS 15s'; + set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; - set read_only_staleness='EXACT_STALENESS 15s'; + set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 15s'; +set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 15s' ; +set read_only_staleness='MAX_STALENESS 12s' ; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 15s' ; +set read_only_staleness='MAX_STALENESS 12s' ; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 15s' +set read_only_staleness='MAX_STALENESS 12s' ; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 15s'; +set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 15s'; +set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; set -read_only_staleness='EXACT_STALENESS 15s'; +read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set read_only_staleness='EXACT_STALENESS 15s'; +foo set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15s' bar; +set read_only_staleness='MAX_STALENESS 12s' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set read_only_staleness='EXACT_STALENESS 15s'; +%set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15s'%; +set read_only_staleness='MAX_STALENESS 12s'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS%15s'; +set read_only_staleness='MAX_STALENESS%12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set read_only_staleness='EXACT_STALENESS 15s'; +_set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15s'_; +set read_only_staleness='MAX_STALENESS 12s'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS_15s'; +set read_only_staleness='MAX_STALENESS_12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set read_only_staleness='EXACT_STALENESS 15s'; +&set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15s'&; +set read_only_staleness='MAX_STALENESS 12s'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS&15s'; +set read_only_staleness='MAX_STALENESS&12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set read_only_staleness='EXACT_STALENESS 15s'; +$set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15s'$; +set read_only_staleness='MAX_STALENESS 12s'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS$15s'; +set read_only_staleness='MAX_STALENESS$12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set read_only_staleness='EXACT_STALENESS 15s'; +@set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15s'@; +set read_only_staleness='MAX_STALENESS 12s'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS@15s'; +set read_only_staleness='MAX_STALENESS@12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set read_only_staleness='EXACT_STALENESS 15s'; +!set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15s'!; +set read_only_staleness='MAX_STALENESS 12s'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS!15s'; +set read_only_staleness='MAX_STALENESS!12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set read_only_staleness='EXACT_STALENESS 15s'; +*set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15s'*; +set read_only_staleness='MAX_STALENESS 12s'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS*15s'; +set read_only_staleness='MAX_STALENESS*12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set read_only_staleness='EXACT_STALENESS 15s'; +(set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15s'(; +set read_only_staleness='MAX_STALENESS 12s'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS(15s'; +set read_only_staleness='MAX_STALENESS(12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set read_only_staleness='EXACT_STALENESS 15s'; +)set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15s'); +set read_only_staleness='MAX_STALENESS 12s'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS)15s'; +set read_only_staleness='MAX_STALENESS)12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set read_only_staleness='EXACT_STALENESS 15s'; +-set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15s'-; +set read_only_staleness='MAX_STALENESS 12s'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS-15s'; +set read_only_staleness='MAX_STALENESS-12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set read_only_staleness='EXACT_STALENESS 15s'; ++set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15s'+; +set read_only_staleness='MAX_STALENESS 12s'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS+15s'; +set read_only_staleness='MAX_STALENESS+12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set read_only_staleness='EXACT_STALENESS 15s'; +-#set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15s'-#; +set read_only_staleness='MAX_STALENESS 12s'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS-#15s'; +set read_only_staleness='MAX_STALENESS-#12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set read_only_staleness='EXACT_STALENESS 15s'; +/set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15s'/; +set read_only_staleness='MAX_STALENESS 12s'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS/15s'; +set read_only_staleness='MAX_STALENESS/12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set read_only_staleness='EXACT_STALENESS 15s'; +\set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15s'\; +set read_only_staleness='MAX_STALENESS 12s'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS\15s'; +set read_only_staleness='MAX_STALENESS\12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set read_only_staleness='EXACT_STALENESS 15s'; +?set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15s'?; +set read_only_staleness='MAX_STALENESS 12s'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS?15s'; +set read_only_staleness='MAX_STALENESS?12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set read_only_staleness='EXACT_STALENESS 15s'; +-/set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15s'-/; +set read_only_staleness='MAX_STALENESS 12s'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS-/15s'; +set read_only_staleness='MAX_STALENESS-/12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set read_only_staleness='EXACT_STALENESS 15s'; +/#set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15s'/#; +set read_only_staleness='MAX_STALENESS 12s'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS/#15s'; +set read_only_staleness='MAX_STALENESS/#12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set read_only_staleness='EXACT_STALENESS 15s'; +/-set read_only_staleness='MAX_STALENESS 12s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15s'/-; +set read_only_staleness='MAX_STALENESS 12s'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS/-15s'; +set read_only_staleness='MAX_STALENESS/-12s'; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 1500ms'; +set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; -SET READ_ONLY_STALENESS='EXACT_STALENESS 1500MS'; +SET READ_ONLY_STALENESS='MAX_STALENESS 100MS'; NEW_CONNECTION; -set read_only_staleness='exact_staleness 1500ms'; +set read_only_staleness='max_staleness 100ms'; NEW_CONNECTION; - set read_only_staleness='EXACT_STALENESS 1500ms'; + set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; - set read_only_staleness='EXACT_STALENESS 1500ms'; + set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 1500ms'; +set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 1500ms' ; +set read_only_staleness='MAX_STALENESS 100ms' ; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 1500ms' ; +set read_only_staleness='MAX_STALENESS 100ms' ; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 1500ms' +set read_only_staleness='MAX_STALENESS 100ms' ; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 1500ms'; +set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 1500ms'; +set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; set -read_only_staleness='EXACT_STALENESS 1500ms'; +read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set read_only_staleness='EXACT_STALENESS 1500ms'; +foo set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 1500ms' bar; +set read_only_staleness='MAX_STALENESS 100ms' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set read_only_staleness='EXACT_STALENESS 1500ms'; +%set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 1500ms'%; +set read_only_staleness='MAX_STALENESS 100ms'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS%1500ms'; +set read_only_staleness='MAX_STALENESS%100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set read_only_staleness='EXACT_STALENESS 1500ms'; +_set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 1500ms'_; +set read_only_staleness='MAX_STALENESS 100ms'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS_1500ms'; +set read_only_staleness='MAX_STALENESS_100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set read_only_staleness='EXACT_STALENESS 1500ms'; +&set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 1500ms'&; +set read_only_staleness='MAX_STALENESS 100ms'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS&1500ms'; +set read_only_staleness='MAX_STALENESS&100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set read_only_staleness='EXACT_STALENESS 1500ms'; +$set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 1500ms'$; +set read_only_staleness='MAX_STALENESS 100ms'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS$1500ms'; +set read_only_staleness='MAX_STALENESS$100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set read_only_staleness='EXACT_STALENESS 1500ms'; +@set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 1500ms'@; +set read_only_staleness='MAX_STALENESS 100ms'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS@1500ms'; +set read_only_staleness='MAX_STALENESS@100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set read_only_staleness='EXACT_STALENESS 1500ms'; +!set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 1500ms'!; +set read_only_staleness='MAX_STALENESS 100ms'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS!1500ms'; +set read_only_staleness='MAX_STALENESS!100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set read_only_staleness='EXACT_STALENESS 1500ms'; +*set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 1500ms'*; +set read_only_staleness='MAX_STALENESS 100ms'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS*1500ms'; +set read_only_staleness='MAX_STALENESS*100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set read_only_staleness='EXACT_STALENESS 1500ms'; +(set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 1500ms'(; +set read_only_staleness='MAX_STALENESS 100ms'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS(1500ms'; +set read_only_staleness='MAX_STALENESS(100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set read_only_staleness='EXACT_STALENESS 1500ms'; +)set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 1500ms'); +set read_only_staleness='MAX_STALENESS 100ms'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS)1500ms'; +set read_only_staleness='MAX_STALENESS)100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set read_only_staleness='EXACT_STALENESS 1500ms'; +-set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 1500ms'-; +set read_only_staleness='MAX_STALENESS 100ms'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS-1500ms'; +set read_only_staleness='MAX_STALENESS-100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set read_only_staleness='EXACT_STALENESS 1500ms'; ++set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 1500ms'+; +set read_only_staleness='MAX_STALENESS 100ms'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS+1500ms'; +set read_only_staleness='MAX_STALENESS+100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set read_only_staleness='EXACT_STALENESS 1500ms'; +-#set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 1500ms'-#; +set read_only_staleness='MAX_STALENESS 100ms'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS-#1500ms'; +set read_only_staleness='MAX_STALENESS-#100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set read_only_staleness='EXACT_STALENESS 1500ms'; +/set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 1500ms'/; +set read_only_staleness='MAX_STALENESS 100ms'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS/1500ms'; +set read_only_staleness='MAX_STALENESS/100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set read_only_staleness='EXACT_STALENESS 1500ms'; +\set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 1500ms'\; +set read_only_staleness='MAX_STALENESS 100ms'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS\1500ms'; +set read_only_staleness='MAX_STALENESS\100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set read_only_staleness='EXACT_STALENESS 1500ms'; +?set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 1500ms'?; +set read_only_staleness='MAX_STALENESS 100ms'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS?1500ms'; +set read_only_staleness='MAX_STALENESS?100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set read_only_staleness='EXACT_STALENESS 1500ms'; +-/set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 1500ms'-/; +set read_only_staleness='MAX_STALENESS 100ms'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS-/1500ms'; +set read_only_staleness='MAX_STALENESS-/100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set read_only_staleness='EXACT_STALENESS 1500ms'; +/#set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 1500ms'/#; +set read_only_staleness='MAX_STALENESS 100ms'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS/#1500ms'; +set read_only_staleness='MAX_STALENESS/#100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set read_only_staleness='EXACT_STALENESS 1500ms'; +/-set read_only_staleness='MAX_STALENESS 100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 1500ms'/-; +set read_only_staleness='MAX_STALENESS 100ms'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS/-1500ms'; +set read_only_staleness='MAX_STALENESS/-100ms'; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 15000000us'; +set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; -SET READ_ONLY_STALENESS='EXACT_STALENESS 15000000US'; +SET READ_ONLY_STALENESS='MAX_STALENESS 99999US'; NEW_CONNECTION; -set read_only_staleness='exact_staleness 15000000us'; +set read_only_staleness='max_staleness 99999us'; NEW_CONNECTION; - set read_only_staleness='EXACT_STALENESS 15000000us'; + set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; - set read_only_staleness='EXACT_STALENESS 15000000us'; + set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 15000000us'; +set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 15000000us' ; +set read_only_staleness='MAX_STALENESS 99999us' ; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 15000000us' ; +set read_only_staleness='MAX_STALENESS 99999us' ; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 15000000us' +set read_only_staleness='MAX_STALENESS 99999us' ; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 15000000us'; +set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 15000000us'; +set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; set -read_only_staleness='EXACT_STALENESS 15000000us'; +read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set read_only_staleness='EXACT_STALENESS 15000000us'; +foo set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15000000us' bar; +set read_only_staleness='MAX_STALENESS 99999us' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set read_only_staleness='EXACT_STALENESS 15000000us'; +%set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15000000us'%; +set read_only_staleness='MAX_STALENESS 99999us'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS%15000000us'; +set read_only_staleness='MAX_STALENESS%99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set read_only_staleness='EXACT_STALENESS 15000000us'; +_set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15000000us'_; +set read_only_staleness='MAX_STALENESS 99999us'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS_15000000us'; +set read_only_staleness='MAX_STALENESS_99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set read_only_staleness='EXACT_STALENESS 15000000us'; +&set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15000000us'&; +set read_only_staleness='MAX_STALENESS 99999us'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS&15000000us'; +set read_only_staleness='MAX_STALENESS&99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set read_only_staleness='EXACT_STALENESS 15000000us'; +$set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15000000us'$; +set read_only_staleness='MAX_STALENESS 99999us'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS$15000000us'; +set read_only_staleness='MAX_STALENESS$99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set read_only_staleness='EXACT_STALENESS 15000000us'; +@set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15000000us'@; +set read_only_staleness='MAX_STALENESS 99999us'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS@15000000us'; +set read_only_staleness='MAX_STALENESS@99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set read_only_staleness='EXACT_STALENESS 15000000us'; +!set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15000000us'!; +set read_only_staleness='MAX_STALENESS 99999us'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS!15000000us'; +set read_only_staleness='MAX_STALENESS!99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set read_only_staleness='EXACT_STALENESS 15000000us'; +*set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15000000us'*; +set read_only_staleness='MAX_STALENESS 99999us'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS*15000000us'; +set read_only_staleness='MAX_STALENESS*99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set read_only_staleness='EXACT_STALENESS 15000000us'; +(set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15000000us'(; +set read_only_staleness='MAX_STALENESS 99999us'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS(15000000us'; +set read_only_staleness='MAX_STALENESS(99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set read_only_staleness='EXACT_STALENESS 15000000us'; +)set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15000000us'); +set read_only_staleness='MAX_STALENESS 99999us'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS)15000000us'; +set read_only_staleness='MAX_STALENESS)99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set read_only_staleness='EXACT_STALENESS 15000000us'; +-set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15000000us'-; +set read_only_staleness='MAX_STALENESS 99999us'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS-15000000us'; +set read_only_staleness='MAX_STALENESS-99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set read_only_staleness='EXACT_STALENESS 15000000us'; ++set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15000000us'+; +set read_only_staleness='MAX_STALENESS 99999us'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS+15000000us'; +set read_only_staleness='MAX_STALENESS+99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set read_only_staleness='EXACT_STALENESS 15000000us'; +-#set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15000000us'-#; +set read_only_staleness='MAX_STALENESS 99999us'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS-#15000000us'; +set read_only_staleness='MAX_STALENESS-#99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set read_only_staleness='EXACT_STALENESS 15000000us'; +/set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15000000us'/; +set read_only_staleness='MAX_STALENESS 99999us'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS/15000000us'; +set read_only_staleness='MAX_STALENESS/99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set read_only_staleness='EXACT_STALENESS 15000000us'; +\set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15000000us'\; +set read_only_staleness='MAX_STALENESS 99999us'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS\15000000us'; +set read_only_staleness='MAX_STALENESS\99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set read_only_staleness='EXACT_STALENESS 15000000us'; +?set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15000000us'?; +set read_only_staleness='MAX_STALENESS 99999us'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS?15000000us'; +set read_only_staleness='MAX_STALENESS?99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set read_only_staleness='EXACT_STALENESS 15000000us'; +-/set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15000000us'-/; +set read_only_staleness='MAX_STALENESS 99999us'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS-/15000000us'; +set read_only_staleness='MAX_STALENESS-/99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set read_only_staleness='EXACT_STALENESS 15000000us'; +/#set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15000000us'/#; +set read_only_staleness='MAX_STALENESS 99999us'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS/#15000000us'; +set read_only_staleness='MAX_STALENESS/#99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set read_only_staleness='EXACT_STALENESS 15000000us'; +/-set read_only_staleness='MAX_STALENESS 99999us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 15000000us'/-; +set read_only_staleness='MAX_STALENESS 99999us'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS/-15000000us'; +set read_only_staleness='MAX_STALENESS/-99999us'; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 9999ns'; +set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; -SET READ_ONLY_STALENESS='EXACT_STALENESS 9999NS'; +SET READ_ONLY_STALENESS='MAX_STALENESS 10NS'; NEW_CONNECTION; -set read_only_staleness='exact_staleness 9999ns'; +set read_only_staleness='max_staleness 10ns'; NEW_CONNECTION; - set read_only_staleness='EXACT_STALENESS 9999ns'; + set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; - set read_only_staleness='EXACT_STALENESS 9999ns'; + set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 9999ns'; +set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 9999ns' ; +set read_only_staleness='MAX_STALENESS 10ns' ; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 9999ns' ; +set read_only_staleness='MAX_STALENESS 10ns' ; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 9999ns' +set read_only_staleness='MAX_STALENESS 10ns' ; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 9999ns'; +set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; -set read_only_staleness='EXACT_STALENESS 9999ns'; +set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; set -read_only_staleness='EXACT_STALENESS 9999ns'; +read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set read_only_staleness='EXACT_STALENESS 9999ns'; +foo set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 9999ns' bar; +set read_only_staleness='MAX_STALENESS 10ns' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set read_only_staleness='EXACT_STALENESS 9999ns'; +%set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 9999ns'%; +set read_only_staleness='MAX_STALENESS 10ns'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS%9999ns'; +set read_only_staleness='MAX_STALENESS%10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set read_only_staleness='EXACT_STALENESS 9999ns'; +_set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 9999ns'_; +set read_only_staleness='MAX_STALENESS 10ns'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS_9999ns'; +set read_only_staleness='MAX_STALENESS_10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set read_only_staleness='EXACT_STALENESS 9999ns'; +&set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 9999ns'&; +set read_only_staleness='MAX_STALENESS 10ns'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS&9999ns'; +set read_only_staleness='MAX_STALENESS&10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set read_only_staleness='EXACT_STALENESS 9999ns'; +$set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 9999ns'$; +set read_only_staleness='MAX_STALENESS 10ns'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS$9999ns'; +set read_only_staleness='MAX_STALENESS$10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set read_only_staleness='EXACT_STALENESS 9999ns'; +@set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 9999ns'@; +set read_only_staleness='MAX_STALENESS 10ns'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS@9999ns'; +set read_only_staleness='MAX_STALENESS@10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set read_only_staleness='EXACT_STALENESS 9999ns'; +!set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 9999ns'!; +set read_only_staleness='MAX_STALENESS 10ns'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS!9999ns'; +set read_only_staleness='MAX_STALENESS!10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set read_only_staleness='EXACT_STALENESS 9999ns'; +*set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 9999ns'*; +set read_only_staleness='MAX_STALENESS 10ns'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS*9999ns'; +set read_only_staleness='MAX_STALENESS*10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set read_only_staleness='EXACT_STALENESS 9999ns'; +(set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 9999ns'(; +set read_only_staleness='MAX_STALENESS 10ns'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS(9999ns'; +set read_only_staleness='MAX_STALENESS(10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set read_only_staleness='EXACT_STALENESS 9999ns'; +)set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 9999ns'); +set read_only_staleness='MAX_STALENESS 10ns'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS)9999ns'; +set read_only_staleness='MAX_STALENESS)10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set read_only_staleness='EXACT_STALENESS 9999ns'; +-set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 9999ns'-; +set read_only_staleness='MAX_STALENESS 10ns'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS-9999ns'; +set read_only_staleness='MAX_STALENESS-10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set read_only_staleness='EXACT_STALENESS 9999ns'; ++set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 9999ns'+; +set read_only_staleness='MAX_STALENESS 10ns'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS+9999ns'; +set read_only_staleness='MAX_STALENESS+10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set read_only_staleness='EXACT_STALENESS 9999ns'; +-#set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 9999ns'-#; +set read_only_staleness='MAX_STALENESS 10ns'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS-#9999ns'; +set read_only_staleness='MAX_STALENESS-#10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set read_only_staleness='EXACT_STALENESS 9999ns'; +/set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 9999ns'/; +set read_only_staleness='MAX_STALENESS 10ns'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS/9999ns'; +set read_only_staleness='MAX_STALENESS/10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set read_only_staleness='EXACT_STALENESS 9999ns'; +\set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 9999ns'\; +set read_only_staleness='MAX_STALENESS 10ns'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS\9999ns'; +set read_only_staleness='MAX_STALENESS\10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set read_only_staleness='EXACT_STALENESS 9999ns'; +?set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 9999ns'?; +set read_only_staleness='MAX_STALENESS 10ns'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS?9999ns'; +set read_only_staleness='MAX_STALENESS?10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set read_only_staleness='EXACT_STALENESS 9999ns'; +-/set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 9999ns'-/; +set read_only_staleness='MAX_STALENESS 10ns'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS-/9999ns'; +set read_only_staleness='MAX_STALENESS-/10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set read_only_staleness='EXACT_STALENESS 9999ns'; +/#set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 9999ns'/#; +set read_only_staleness='MAX_STALENESS 10ns'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS/#9999ns'; +set read_only_staleness='MAX_STALENESS/#10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set read_only_staleness='EXACT_STALENESS 9999ns'; +/-set read_only_staleness='MAX_STALENESS 10ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS 9999ns'/-; +set read_only_staleness='MAX_STALENESS 10ns'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set read_only_staleness='EXACT_STALENESS/-9999ns'; +set read_only_staleness='MAX_STALENESS/-10ns'; NEW_CONNECTION; -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +set read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; - set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +SET READ_ONLY_STALENESS='EXACT_STALENESS 15S'; NEW_CONNECTION; - set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +set read_only_staleness='exact_staleness 15s'; +NEW_CONNECTION; + set read_only_staleness='EXACT_STALENESS 15s'; +NEW_CONNECTION; + set read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +set read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}' ; +set read_only_staleness='EXACT_STALENESS 15s' ; NEW_CONNECTION; -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}' ; +set read_only_staleness='EXACT_STALENESS 15s' ; NEW_CONNECTION; -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}' +set read_only_staleness='EXACT_STALENESS 15s' ; NEW_CONNECTION; -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +set read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +set read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; set -directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +foo set read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}' bar; +set read_only_staleness='EXACT_STALENESS 15s' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +%set read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'%; +set read_only_staleness='EXACT_STALENESS 15s'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +set read_only_staleness='EXACT_STALENESS%15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +_set read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'_; +set read_only_staleness='EXACT_STALENESS 15s'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +set read_only_staleness='EXACT_STALENESS_15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +&set read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'&; +set read_only_staleness='EXACT_STALENESS 15s'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +set read_only_staleness='EXACT_STALENESS&15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +$set read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'$; +set read_only_staleness='EXACT_STALENESS 15s'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +set read_only_staleness='EXACT_STALENESS$15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +@set read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'@; +set read_only_staleness='EXACT_STALENESS 15s'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +set read_only_staleness='EXACT_STALENESS@15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +!set read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'!; +set read_only_staleness='EXACT_STALENESS 15s'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +set read_only_staleness='EXACT_STALENESS!15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +*set read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'*; +set read_only_staleness='EXACT_STALENESS 15s'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +set read_only_staleness='EXACT_STALENESS*15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +(set read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'(; +set read_only_staleness='EXACT_STALENESS 15s'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +set read_only_staleness='EXACT_STALENESS(15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +)set read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'); +set read_only_staleness='EXACT_STALENESS 15s'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +set read_only_staleness='EXACT_STALENESS)15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +-set read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'-; +set read_only_staleness='EXACT_STALENESS 15s'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +set read_only_staleness='EXACT_STALENESS-15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; ++set read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'+; +set read_only_staleness='EXACT_STALENESS 15s'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +set read_only_staleness='EXACT_STALENESS+15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +-#set read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'-#; +set read_only_staleness='EXACT_STALENESS 15s'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +set read_only_staleness='EXACT_STALENESS-#15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +/set read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'/; +set read_only_staleness='EXACT_STALENESS 15s'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +set read_only_staleness='EXACT_STALENESS/15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +\set read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'\; +set read_only_staleness='EXACT_STALENESS 15s'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +set read_only_staleness='EXACT_STALENESS\15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +?set read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'?; +set read_only_staleness='EXACT_STALENESS 15s'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +set read_only_staleness='EXACT_STALENESS?15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +-/set read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'-/; +set read_only_staleness='EXACT_STALENESS 15s'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +set read_only_staleness='EXACT_STALENESS-/15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +/#set read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'/#; +set read_only_staleness='EXACT_STALENESS 15s'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +set read_only_staleness='EXACT_STALENESS/#15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +/-set read_only_staleness='EXACT_STALENESS 15s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'/-; +set read_only_staleness='EXACT_STALENESS 15s'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; +set read_only_staleness='EXACT_STALENESS/-15s'; NEW_CONNECTION; -set directed_read=''; +set read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; - set directed_read=''; +SET READ_ONLY_STALENESS='EXACT_STALENESS 1500MS'; NEW_CONNECTION; - set directed_read=''; +set read_only_staleness='exact_staleness 1500ms'; +NEW_CONNECTION; + set read_only_staleness='EXACT_STALENESS 1500ms'; +NEW_CONNECTION; + set read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; -set directed_read=''; +set read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; -set directed_read='' ; +set read_only_staleness='EXACT_STALENESS 1500ms' ; NEW_CONNECTION; -set directed_read='' ; +set read_only_staleness='EXACT_STALENESS 1500ms' ; NEW_CONNECTION; -set directed_read='' +set read_only_staleness='EXACT_STALENESS 1500ms' ; NEW_CONNECTION; -set directed_read=''; +set read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; -set directed_read=''; +set read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; set -directed_read=''; +read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set directed_read=''; +foo set read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read='' bar; +set read_only_staleness='EXACT_STALENESS 1500ms' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set directed_read=''; +%set read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read=''%; +set read_only_staleness='EXACT_STALENESS 1500ms'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%directed_read=''; +set read_only_staleness='EXACT_STALENESS%1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set directed_read=''; +_set read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read=''_; +set read_only_staleness='EXACT_STALENESS 1500ms'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_directed_read=''; +set read_only_staleness='EXACT_STALENESS_1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set directed_read=''; +&set read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read=''&; +set read_only_staleness='EXACT_STALENESS 1500ms'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&directed_read=''; +set read_only_staleness='EXACT_STALENESS&1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set directed_read=''; +$set read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read=''$; +set read_only_staleness='EXACT_STALENESS 1500ms'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$directed_read=''; +set read_only_staleness='EXACT_STALENESS$1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set directed_read=''; +@set read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read=''@; +set read_only_staleness='EXACT_STALENESS 1500ms'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@directed_read=''; +set read_only_staleness='EXACT_STALENESS@1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set directed_read=''; +!set read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read=''!; +set read_only_staleness='EXACT_STALENESS 1500ms'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!directed_read=''; +set read_only_staleness='EXACT_STALENESS!1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set directed_read=''; +*set read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read=''*; +set read_only_staleness='EXACT_STALENESS 1500ms'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*directed_read=''; +set read_only_staleness='EXACT_STALENESS*1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set directed_read=''; +(set read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read=''(; +set read_only_staleness='EXACT_STALENESS 1500ms'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(directed_read=''; +set read_only_staleness='EXACT_STALENESS(1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set directed_read=''; +)set read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read=''); +set read_only_staleness='EXACT_STALENESS 1500ms'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)directed_read=''; +set read_only_staleness='EXACT_STALENESS)1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set directed_read=''; +-set read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read=''-; +set read_only_staleness='EXACT_STALENESS 1500ms'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-directed_read=''; +set read_only_staleness='EXACT_STALENESS-1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set directed_read=''; ++set read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read=''+; +set read_only_staleness='EXACT_STALENESS 1500ms'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+directed_read=''; +set read_only_staleness='EXACT_STALENESS+1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set directed_read=''; +-#set read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read=''-#; +set read_only_staleness='EXACT_STALENESS 1500ms'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#directed_read=''; +set read_only_staleness='EXACT_STALENESS-#1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set directed_read=''; +/set read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read=''/; +set read_only_staleness='EXACT_STALENESS 1500ms'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/directed_read=''; +set read_only_staleness='EXACT_STALENESS/1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set directed_read=''; +\set read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read=''\; +set read_only_staleness='EXACT_STALENESS 1500ms'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\directed_read=''; +set read_only_staleness='EXACT_STALENESS\1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set directed_read=''; +?set read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read=''?; +set read_only_staleness='EXACT_STALENESS 1500ms'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?directed_read=''; +set read_only_staleness='EXACT_STALENESS?1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set directed_read=''; +-/set read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read=''-/; +set read_only_staleness='EXACT_STALENESS 1500ms'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/directed_read=''; +set read_only_staleness='EXACT_STALENESS-/1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set directed_read=''; +/#set read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read=''/#; +set read_only_staleness='EXACT_STALENESS 1500ms'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#directed_read=''; +set read_only_staleness='EXACT_STALENESS/#1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set directed_read=''; +/-set read_only_staleness='EXACT_STALENESS 1500ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set directed_read=''/-; +set read_only_staleness='EXACT_STALENESS 1500ms'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-directed_read=''; +set read_only_staleness='EXACT_STALENESS/-1500ms'; NEW_CONNECTION; -set optimizer_version='1'; +set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; -SET OPTIMIZER_VERSION='1'; +SET READ_ONLY_STALENESS='EXACT_STALENESS 15000000US'; NEW_CONNECTION; -set optimizer_version='1'; +set read_only_staleness='exact_staleness 15000000us'; NEW_CONNECTION; - set optimizer_version='1'; + set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; - set optimizer_version='1'; + set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; -set optimizer_version='1'; +set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; -set optimizer_version='1' ; +set read_only_staleness='EXACT_STALENESS 15000000us' ; NEW_CONNECTION; -set optimizer_version='1' ; +set read_only_staleness='EXACT_STALENESS 15000000us' ; NEW_CONNECTION; -set optimizer_version='1' +set read_only_staleness='EXACT_STALENESS 15000000us' ; NEW_CONNECTION; -set optimizer_version='1'; +set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; -set optimizer_version='1'; +set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; set -optimizer_version='1'; +read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set optimizer_version='1'; +foo set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='1' bar; +set read_only_staleness='EXACT_STALENESS 15000000us' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set optimizer_version='1'; +%set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='1'%; +set read_only_staleness='EXACT_STALENESS 15000000us'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%optimizer_version='1'; +set read_only_staleness='EXACT_STALENESS%15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set optimizer_version='1'; +_set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='1'_; +set read_only_staleness='EXACT_STALENESS 15000000us'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_optimizer_version='1'; +set read_only_staleness='EXACT_STALENESS_15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set optimizer_version='1'; +&set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='1'&; +set read_only_staleness='EXACT_STALENESS 15000000us'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&optimizer_version='1'; +set read_only_staleness='EXACT_STALENESS&15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set optimizer_version='1'; +$set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='1'$; +set read_only_staleness='EXACT_STALENESS 15000000us'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$optimizer_version='1'; +set read_only_staleness='EXACT_STALENESS$15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set optimizer_version='1'; +@set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='1'@; +set read_only_staleness='EXACT_STALENESS 15000000us'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@optimizer_version='1'; +set read_only_staleness='EXACT_STALENESS@15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set optimizer_version='1'; +!set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='1'!; +set read_only_staleness='EXACT_STALENESS 15000000us'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!optimizer_version='1'; +set read_only_staleness='EXACT_STALENESS!15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set optimizer_version='1'; +*set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='1'*; +set read_only_staleness='EXACT_STALENESS 15000000us'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*optimizer_version='1'; +set read_only_staleness='EXACT_STALENESS*15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set optimizer_version='1'; +(set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='1'(; +set read_only_staleness='EXACT_STALENESS 15000000us'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(optimizer_version='1'; +set read_only_staleness='EXACT_STALENESS(15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set optimizer_version='1'; +)set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='1'); +set read_only_staleness='EXACT_STALENESS 15000000us'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)optimizer_version='1'; +set read_only_staleness='EXACT_STALENESS)15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set optimizer_version='1'; +-set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='1'-; +set read_only_staleness='EXACT_STALENESS 15000000us'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-optimizer_version='1'; +set read_only_staleness='EXACT_STALENESS-15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set optimizer_version='1'; ++set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='1'+; +set read_only_staleness='EXACT_STALENESS 15000000us'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+optimizer_version='1'; +set read_only_staleness='EXACT_STALENESS+15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set optimizer_version='1'; +-#set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='1'-#; +set read_only_staleness='EXACT_STALENESS 15000000us'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#optimizer_version='1'; +set read_only_staleness='EXACT_STALENESS-#15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set optimizer_version='1'; +/set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='1'/; +set read_only_staleness='EXACT_STALENESS 15000000us'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/optimizer_version='1'; +set read_only_staleness='EXACT_STALENESS/15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set optimizer_version='1'; +\set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='1'\; +set read_only_staleness='EXACT_STALENESS 15000000us'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\optimizer_version='1'; +set read_only_staleness='EXACT_STALENESS\15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set optimizer_version='1'; +?set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='1'?; +set read_only_staleness='EXACT_STALENESS 15000000us'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?optimizer_version='1'; +set read_only_staleness='EXACT_STALENESS?15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set optimizer_version='1'; +-/set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='1'-/; +set read_only_staleness='EXACT_STALENESS 15000000us'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/optimizer_version='1'; +set read_only_staleness='EXACT_STALENESS-/15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set optimizer_version='1'; +/#set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='1'/#; +set read_only_staleness='EXACT_STALENESS 15000000us'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#optimizer_version='1'; +set read_only_staleness='EXACT_STALENESS/#15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set optimizer_version='1'; +/-set read_only_staleness='EXACT_STALENESS 15000000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='1'/-; +set read_only_staleness='EXACT_STALENESS 15000000us'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-optimizer_version='1'; +set read_only_staleness='EXACT_STALENESS/-15000000us'; NEW_CONNECTION; -set optimizer_version='200'; +set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; -SET OPTIMIZER_VERSION='200'; +SET READ_ONLY_STALENESS='EXACT_STALENESS 9999NS'; NEW_CONNECTION; -set optimizer_version='200'; +set read_only_staleness='exact_staleness 9999ns'; NEW_CONNECTION; - set optimizer_version='200'; + set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; - set optimizer_version='200'; + set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; -set optimizer_version='200'; +set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; -set optimizer_version='200' ; +set read_only_staleness='EXACT_STALENESS 9999ns' ; NEW_CONNECTION; -set optimizer_version='200' ; +set read_only_staleness='EXACT_STALENESS 9999ns' ; NEW_CONNECTION; -set optimizer_version='200' +set read_only_staleness='EXACT_STALENESS 9999ns' ; NEW_CONNECTION; -set optimizer_version='200'; +set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; -set optimizer_version='200'; +set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; set -optimizer_version='200'; +read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set optimizer_version='200'; +foo set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='200' bar; +set read_only_staleness='EXACT_STALENESS 9999ns' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set optimizer_version='200'; +%set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='200'%; +set read_only_staleness='EXACT_STALENESS 9999ns'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%optimizer_version='200'; +set read_only_staleness='EXACT_STALENESS%9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set optimizer_version='200'; +_set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='200'_; +set read_only_staleness='EXACT_STALENESS 9999ns'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_optimizer_version='200'; +set read_only_staleness='EXACT_STALENESS_9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set optimizer_version='200'; +&set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='200'&; +set read_only_staleness='EXACT_STALENESS 9999ns'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&optimizer_version='200'; +set read_only_staleness='EXACT_STALENESS&9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set optimizer_version='200'; +$set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='200'$; +set read_only_staleness='EXACT_STALENESS 9999ns'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$optimizer_version='200'; +set read_only_staleness='EXACT_STALENESS$9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set optimizer_version='200'; +@set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='200'@; +set read_only_staleness='EXACT_STALENESS 9999ns'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@optimizer_version='200'; +set read_only_staleness='EXACT_STALENESS@9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set optimizer_version='200'; +!set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='200'!; +set read_only_staleness='EXACT_STALENESS 9999ns'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!optimizer_version='200'; +set read_only_staleness='EXACT_STALENESS!9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set optimizer_version='200'; +*set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='200'*; +set read_only_staleness='EXACT_STALENESS 9999ns'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*optimizer_version='200'; +set read_only_staleness='EXACT_STALENESS*9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set optimizer_version='200'; +(set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='200'(; +set read_only_staleness='EXACT_STALENESS 9999ns'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(optimizer_version='200'; +set read_only_staleness='EXACT_STALENESS(9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set optimizer_version='200'; +)set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='200'); +set read_only_staleness='EXACT_STALENESS 9999ns'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)optimizer_version='200'; +set read_only_staleness='EXACT_STALENESS)9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set optimizer_version='200'; +-set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='200'-; +set read_only_staleness='EXACT_STALENESS 9999ns'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-optimizer_version='200'; +set read_only_staleness='EXACT_STALENESS-9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set optimizer_version='200'; ++set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='200'+; +set read_only_staleness='EXACT_STALENESS 9999ns'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+optimizer_version='200'; +set read_only_staleness='EXACT_STALENESS+9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set optimizer_version='200'; +-#set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='200'-#; +set read_only_staleness='EXACT_STALENESS 9999ns'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#optimizer_version='200'; +set read_only_staleness='EXACT_STALENESS-#9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set optimizer_version='200'; +/set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='200'/; +set read_only_staleness='EXACT_STALENESS 9999ns'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/optimizer_version='200'; +set read_only_staleness='EXACT_STALENESS/9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set optimizer_version='200'; +\set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='200'\; +set read_only_staleness='EXACT_STALENESS 9999ns'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\optimizer_version='200'; +set read_only_staleness='EXACT_STALENESS\9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set optimizer_version='200'; +?set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='200'?; +set read_only_staleness='EXACT_STALENESS 9999ns'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?optimizer_version='200'; +set read_only_staleness='EXACT_STALENESS?9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set optimizer_version='200'; +-/set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='200'-/; +set read_only_staleness='EXACT_STALENESS 9999ns'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/optimizer_version='200'; +set read_only_staleness='EXACT_STALENESS-/9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set optimizer_version='200'; +/#set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='200'/#; +set read_only_staleness='EXACT_STALENESS 9999ns'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#optimizer_version='200'; +set read_only_staleness='EXACT_STALENESS/#9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set optimizer_version='200'; +/-set read_only_staleness='EXACT_STALENESS 9999ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='200'/-; +set read_only_staleness='EXACT_STALENESS 9999ns'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-optimizer_version='200'; -NEW_CONNECTION; -set optimizer_version='LATEST'; -NEW_CONNECTION; -SET OPTIMIZER_VERSION='LATEST'; +set read_only_staleness='EXACT_STALENESS/-9999ns'; NEW_CONNECTION; -set optimizer_version='latest'; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; - set optimizer_version='LATEST'; + set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; - set optimizer_version='LATEST'; + set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; -set optimizer_version='LATEST'; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; -set optimizer_version='LATEST' ; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}' ; NEW_CONNECTION; -set optimizer_version='LATEST' ; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}' ; NEW_CONNECTION; -set optimizer_version='LATEST' +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}' ; NEW_CONNECTION; -set optimizer_version='LATEST'; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; -set optimizer_version='LATEST'; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; set -optimizer_version='LATEST'; +directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set optimizer_version='LATEST'; +foo set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='LATEST' bar; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set optimizer_version='LATEST'; +%set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='LATEST'%; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%optimizer_version='LATEST'; +set%directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set optimizer_version='LATEST'; +_set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='LATEST'_; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_optimizer_version='LATEST'; +set_directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set optimizer_version='LATEST'; +&set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='LATEST'&; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&optimizer_version='LATEST'; +set&directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set optimizer_version='LATEST'; +$set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='LATEST'$; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$optimizer_version='LATEST'; +set$directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set optimizer_version='LATEST'; +@set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='LATEST'@; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@optimizer_version='LATEST'; +set@directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set optimizer_version='LATEST'; +!set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='LATEST'!; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!optimizer_version='LATEST'; +set!directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set optimizer_version='LATEST'; +*set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='LATEST'*; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*optimizer_version='LATEST'; +set*directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set optimizer_version='LATEST'; +(set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='LATEST'(; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(optimizer_version='LATEST'; +set(directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set optimizer_version='LATEST'; +)set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='LATEST'); +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)optimizer_version='LATEST'; +set)directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set optimizer_version='LATEST'; +-set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='LATEST'-; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-optimizer_version='LATEST'; +set-directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set optimizer_version='LATEST'; ++set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='LATEST'+; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+optimizer_version='LATEST'; +set+directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set optimizer_version='LATEST'; +-#set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='LATEST'-#; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#optimizer_version='LATEST'; +set-#directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set optimizer_version='LATEST'; +/set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='LATEST'/; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/optimizer_version='LATEST'; +set/directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set optimizer_version='LATEST'; +\set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='LATEST'\; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\optimizer_version='LATEST'; +set\directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set optimizer_version='LATEST'; +?set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='LATEST'?; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?optimizer_version='LATEST'; +set?directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set optimizer_version='LATEST'; +-/set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='LATEST'-/; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/optimizer_version='LATEST'; +set-/directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set optimizer_version='LATEST'; +/#set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='LATEST'/#; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#optimizer_version='LATEST'; +set/#directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set optimizer_version='LATEST'; +/-set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='LATEST'/-; +set directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-optimizer_version='LATEST'; -NEW_CONNECTION; -set optimizer_version=''; -NEW_CONNECTION; -SET OPTIMIZER_VERSION=''; +set/-directed_read='{"includeReplicas":{"replicaSelections":[{"location":"eu-west1","type":"READ_ONLY"}]}}'; NEW_CONNECTION; -set optimizer_version=''; +set directed_read=''; NEW_CONNECTION; - set optimizer_version=''; + set directed_read=''; NEW_CONNECTION; - set optimizer_version=''; + set directed_read=''; NEW_CONNECTION; -set optimizer_version=''; +set directed_read=''; NEW_CONNECTION; -set optimizer_version='' ; +set directed_read='' ; NEW_CONNECTION; -set optimizer_version='' ; +set directed_read='' ; NEW_CONNECTION; -set optimizer_version='' +set directed_read='' ; NEW_CONNECTION; -set optimizer_version=''; +set directed_read=''; NEW_CONNECTION; -set optimizer_version=''; +set directed_read=''; NEW_CONNECTION; set -optimizer_version=''; +directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set optimizer_version=''; +foo set directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version='' bar; +set directed_read='' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set optimizer_version=''; +%set directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version=''%; +set directed_read=''%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%optimizer_version=''; +set%directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set optimizer_version=''; +_set directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version=''_; +set directed_read=''_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_optimizer_version=''; +set_directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set optimizer_version=''; +&set directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version=''&; +set directed_read=''&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&optimizer_version=''; +set&directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set optimizer_version=''; +$set directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version=''$; +set directed_read=''$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$optimizer_version=''; +set$directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set optimizer_version=''; +@set directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version=''@; +set directed_read=''@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@optimizer_version=''; +set@directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set optimizer_version=''; +!set directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version=''!; +set directed_read=''!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!optimizer_version=''; +set!directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set optimizer_version=''; +*set directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version=''*; +set directed_read=''*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*optimizer_version=''; +set*directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set optimizer_version=''; +(set directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version=''(; +set directed_read=''(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(optimizer_version=''; +set(directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set optimizer_version=''; +)set directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version=''); +set directed_read=''); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)optimizer_version=''; +set)directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set optimizer_version=''; +-set directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version=''-; +set directed_read=''-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-optimizer_version=''; +set-directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set optimizer_version=''; ++set directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version=''+; +set directed_read=''+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+optimizer_version=''; +set+directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set optimizer_version=''; +-#set directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version=''-#; +set directed_read=''-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#optimizer_version=''; +set-#directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set optimizer_version=''; +/set directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version=''/; +set directed_read=''/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/optimizer_version=''; +set/directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set optimizer_version=''; +\set directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version=''\; +set directed_read=''\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\optimizer_version=''; +set\directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set optimizer_version=''; +?set directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version=''?; +set directed_read=''?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?optimizer_version=''; +set?directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set optimizer_version=''; +-/set directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version=''-/; +set directed_read=''-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/optimizer_version=''; +set-/directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set optimizer_version=''; +/#set directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version=''/#; +set directed_read=''/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#optimizer_version=''; +set/#directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set optimizer_version=''; +/-set directed_read=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_version=''/-; +set directed_read=''/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-optimizer_version=''; +set/-directed_read=''; NEW_CONNECTION; -set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +set optimizer_version='1'; NEW_CONNECTION; -SET OPTIMIZER_STATISTICS_PACKAGE='AUTO_20191128_14_47_22UTC'; +SET OPTIMIZER_VERSION='1'; NEW_CONNECTION; -set optimizer_statistics_package='auto_20191128_14_47_22utc'; +set optimizer_version='1'; NEW_CONNECTION; - set optimizer_statistics_package='auto_20191128_14_47_22UTC'; + set optimizer_version='1'; NEW_CONNECTION; - set optimizer_statistics_package='auto_20191128_14_47_22UTC'; + set optimizer_version='1'; NEW_CONNECTION; -set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +set optimizer_version='1'; NEW_CONNECTION; -set optimizer_statistics_package='auto_20191128_14_47_22UTC' ; +set optimizer_version='1' ; NEW_CONNECTION; -set optimizer_statistics_package='auto_20191128_14_47_22UTC' ; +set optimizer_version='1' ; NEW_CONNECTION; -set optimizer_statistics_package='auto_20191128_14_47_22UTC' +set optimizer_version='1' ; NEW_CONNECTION; -set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +set optimizer_version='1'; NEW_CONNECTION; -set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +set optimizer_version='1'; NEW_CONNECTION; set -optimizer_statistics_package='auto_20191128_14_47_22UTC'; +optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +foo set optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package='auto_20191128_14_47_22UTC' bar; +set optimizer_version='1' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +%set optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package='auto_20191128_14_47_22UTC'%; +set optimizer_version='1'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%optimizer_statistics_package='auto_20191128_14_47_22UTC'; +set%optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +_set optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package='auto_20191128_14_47_22UTC'_; +set optimizer_version='1'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_optimizer_statistics_package='auto_20191128_14_47_22UTC'; +set_optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +&set optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package='auto_20191128_14_47_22UTC'&; +set optimizer_version='1'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&optimizer_statistics_package='auto_20191128_14_47_22UTC'; +set&optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +$set optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package='auto_20191128_14_47_22UTC'$; +set optimizer_version='1'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$optimizer_statistics_package='auto_20191128_14_47_22UTC'; +set$optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +@set optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package='auto_20191128_14_47_22UTC'@; +set optimizer_version='1'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@optimizer_statistics_package='auto_20191128_14_47_22UTC'; +set@optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +!set optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package='auto_20191128_14_47_22UTC'!; +set optimizer_version='1'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!optimizer_statistics_package='auto_20191128_14_47_22UTC'; +set!optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +*set optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package='auto_20191128_14_47_22UTC'*; +set optimizer_version='1'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*optimizer_statistics_package='auto_20191128_14_47_22UTC'; +set*optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +(set optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package='auto_20191128_14_47_22UTC'(; +set optimizer_version='1'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(optimizer_statistics_package='auto_20191128_14_47_22UTC'; +set(optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +)set optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package='auto_20191128_14_47_22UTC'); +set optimizer_version='1'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)optimizer_statistics_package='auto_20191128_14_47_22UTC'; +set)optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +-set optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package='auto_20191128_14_47_22UTC'-; +set optimizer_version='1'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-optimizer_statistics_package='auto_20191128_14_47_22UTC'; +set-optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set optimizer_statistics_package='auto_20191128_14_47_22UTC'; ++set optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package='auto_20191128_14_47_22UTC'+; +set optimizer_version='1'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+optimizer_statistics_package='auto_20191128_14_47_22UTC'; +set+optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +-#set optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package='auto_20191128_14_47_22UTC'-#; +set optimizer_version='1'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#optimizer_statistics_package='auto_20191128_14_47_22UTC'; +set-#optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +/set optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package='auto_20191128_14_47_22UTC'/; +set optimizer_version='1'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/optimizer_statistics_package='auto_20191128_14_47_22UTC'; +set/optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +\set optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package='auto_20191128_14_47_22UTC'\; +set optimizer_version='1'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\optimizer_statistics_package='auto_20191128_14_47_22UTC'; +set\optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +?set optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package='auto_20191128_14_47_22UTC'?; +set optimizer_version='1'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?optimizer_statistics_package='auto_20191128_14_47_22UTC'; +set?optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +-/set optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package='auto_20191128_14_47_22UTC'-/; +set optimizer_version='1'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/optimizer_statistics_package='auto_20191128_14_47_22UTC'; +set-/optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +/#set optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package='auto_20191128_14_47_22UTC'/#; +set optimizer_version='1'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#optimizer_statistics_package='auto_20191128_14_47_22UTC'; +set/#optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set optimizer_statistics_package='auto_20191128_14_47_22UTC'; +/-set optimizer_version='1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package='auto_20191128_14_47_22UTC'/-; +set optimizer_version='1'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-optimizer_statistics_package='auto_20191128_14_47_22UTC'; +set/-optimizer_version='1'; NEW_CONNECTION; -set optimizer_statistics_package=''; +set optimizer_version='200'; NEW_CONNECTION; -SET OPTIMIZER_STATISTICS_PACKAGE=''; +SET OPTIMIZER_VERSION='200'; NEW_CONNECTION; -set optimizer_statistics_package=''; +set optimizer_version='200'; NEW_CONNECTION; - set optimizer_statistics_package=''; + set optimizer_version='200'; NEW_CONNECTION; - set optimizer_statistics_package=''; + set optimizer_version='200'; NEW_CONNECTION; -set optimizer_statistics_package=''; +set optimizer_version='200'; NEW_CONNECTION; -set optimizer_statistics_package='' ; +set optimizer_version='200' ; NEW_CONNECTION; -set optimizer_statistics_package='' ; +set optimizer_version='200' ; NEW_CONNECTION; -set optimizer_statistics_package='' +set optimizer_version='200' ; NEW_CONNECTION; -set optimizer_statistics_package=''; +set optimizer_version='200'; NEW_CONNECTION; -set optimizer_statistics_package=''; +set optimizer_version='200'; NEW_CONNECTION; set -optimizer_statistics_package=''; +optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set optimizer_statistics_package=''; +foo set optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package='' bar; +set optimizer_version='200' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set optimizer_statistics_package=''; +%set optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package=''%; +set optimizer_version='200'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%optimizer_statistics_package=''; +set%optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set optimizer_statistics_package=''; +_set optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package=''_; +set optimizer_version='200'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_optimizer_statistics_package=''; +set_optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set optimizer_statistics_package=''; +&set optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package=''&; +set optimizer_version='200'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&optimizer_statistics_package=''; +set&optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set optimizer_statistics_package=''; +$set optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package=''$; +set optimizer_version='200'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$optimizer_statistics_package=''; +set$optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set optimizer_statistics_package=''; +@set optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package=''@; +set optimizer_version='200'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@optimizer_statistics_package=''; +set@optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set optimizer_statistics_package=''; +!set optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package=''!; +set optimizer_version='200'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!optimizer_statistics_package=''; +set!optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set optimizer_statistics_package=''; +*set optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package=''*; +set optimizer_version='200'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*optimizer_statistics_package=''; +set*optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set optimizer_statistics_package=''; +(set optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package=''(; +set optimizer_version='200'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(optimizer_statistics_package=''; +set(optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set optimizer_statistics_package=''; +)set optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package=''); +set optimizer_version='200'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)optimizer_statistics_package=''; +set)optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set optimizer_statistics_package=''; +-set optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package=''-; +set optimizer_version='200'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-optimizer_statistics_package=''; +set-optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set optimizer_statistics_package=''; ++set optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package=''+; +set optimizer_version='200'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+optimizer_statistics_package=''; +set+optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set optimizer_statistics_package=''; +-#set optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package=''-#; +set optimizer_version='200'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#optimizer_statistics_package=''; +set-#optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set optimizer_statistics_package=''; +/set optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package=''/; +set optimizer_version='200'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/optimizer_statistics_package=''; +set/optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set optimizer_statistics_package=''; +\set optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package=''\; +set optimizer_version='200'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\optimizer_statistics_package=''; +set\optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set optimizer_statistics_package=''; +?set optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package=''?; +set optimizer_version='200'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?optimizer_statistics_package=''; +set?optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set optimizer_statistics_package=''; +-/set optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package=''-/; +set optimizer_version='200'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/optimizer_statistics_package=''; +set-/optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set optimizer_statistics_package=''; +/#set optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package=''/#; +set optimizer_version='200'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#optimizer_statistics_package=''; +set/#optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set optimizer_statistics_package=''; +/-set optimizer_version='200'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set optimizer_statistics_package=''/-; +set optimizer_version='200'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-optimizer_statistics_package=''; +set/-optimizer_version='200'; NEW_CONNECTION; -set return_commit_stats = true; +set optimizer_version='LATEST'; NEW_CONNECTION; -SET RETURN_COMMIT_STATS = TRUE; +SET OPTIMIZER_VERSION='LATEST'; NEW_CONNECTION; -set return_commit_stats = true; +set optimizer_version='latest'; NEW_CONNECTION; - set return_commit_stats = true; + set optimizer_version='LATEST'; NEW_CONNECTION; - set return_commit_stats = true; + set optimizer_version='LATEST'; NEW_CONNECTION; -set return_commit_stats = true; +set optimizer_version='LATEST'; NEW_CONNECTION; -set return_commit_stats = true ; +set optimizer_version='LATEST' ; NEW_CONNECTION; -set return_commit_stats = true ; +set optimizer_version='LATEST' ; NEW_CONNECTION; -set return_commit_stats = true +set optimizer_version='LATEST' ; NEW_CONNECTION; -set return_commit_stats = true; +set optimizer_version='LATEST'; NEW_CONNECTION; -set return_commit_stats = true; +set optimizer_version='LATEST'; NEW_CONNECTION; set -return_commit_stats -= -true; +optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set return_commit_stats = true; +foo set optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = true bar; +set optimizer_version='LATEST' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set return_commit_stats = true; +%set optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = true%; +set optimizer_version='LATEST'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =%true; +set%optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set return_commit_stats = true; +_set optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = true_; +set optimizer_version='LATEST'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =_true; +set_optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set return_commit_stats = true; +&set optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = true&; +set optimizer_version='LATEST'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =&true; +set&optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set return_commit_stats = true; +$set optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = true$; +set optimizer_version='LATEST'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =$true; +set$optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set return_commit_stats = true; +@set optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = true@; +set optimizer_version='LATEST'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =@true; +set@optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set return_commit_stats = true; +!set optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = true!; +set optimizer_version='LATEST'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =!true; +set!optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set return_commit_stats = true; +*set optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = true*; +set optimizer_version='LATEST'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =*true; +set*optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set return_commit_stats = true; +(set optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = true(; +set optimizer_version='LATEST'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =(true; +set(optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set return_commit_stats = true; +)set optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = true); +set optimizer_version='LATEST'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =)true; +set)optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set return_commit_stats = true; +-set optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = true-; +set optimizer_version='LATEST'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =-true; +set-optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set return_commit_stats = true; ++set optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = true+; +set optimizer_version='LATEST'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =+true; +set+optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set return_commit_stats = true; +-#set optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = true-#; +set optimizer_version='LATEST'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =-#true; +set-#optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set return_commit_stats = true; +/set optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = true/; +set optimizer_version='LATEST'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =/true; +set/optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set return_commit_stats = true; +\set optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = true\; +set optimizer_version='LATEST'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =\true; +set\optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set return_commit_stats = true; +?set optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = true?; +set optimizer_version='LATEST'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =?true; +set?optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set return_commit_stats = true; +-/set optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = true-/; +set optimizer_version='LATEST'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =-/true; +set-/optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set return_commit_stats = true; +/#set optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = true/#; +set optimizer_version='LATEST'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =/#true; +set/#optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set return_commit_stats = true; +/-set optimizer_version='LATEST'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = true/-; +set optimizer_version='LATEST'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =/-true; +set/-optimizer_version='LATEST'; NEW_CONNECTION; -set return_commit_stats = false; +set optimizer_version=''; NEW_CONNECTION; -SET RETURN_COMMIT_STATS = FALSE; +SET OPTIMIZER_VERSION=''; NEW_CONNECTION; -set return_commit_stats = false; +set optimizer_version=''; NEW_CONNECTION; - set return_commit_stats = false; + set optimizer_version=''; NEW_CONNECTION; - set return_commit_stats = false; + set optimizer_version=''; NEW_CONNECTION; -set return_commit_stats = false; +set optimizer_version=''; NEW_CONNECTION; -set return_commit_stats = false ; +set optimizer_version='' ; NEW_CONNECTION; -set return_commit_stats = false ; +set optimizer_version='' ; NEW_CONNECTION; -set return_commit_stats = false +set optimizer_version='' ; NEW_CONNECTION; -set return_commit_stats = false; +set optimizer_version=''; NEW_CONNECTION; -set return_commit_stats = false; +set optimizer_version=''; NEW_CONNECTION; set -return_commit_stats -= -false; +optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set return_commit_stats = false; +foo set optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = false bar; +set optimizer_version='' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set return_commit_stats = false; +%set optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = false%; +set optimizer_version=''%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =%false; +set%optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set return_commit_stats = false; +_set optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = false_; +set optimizer_version=''_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =_false; +set_optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set return_commit_stats = false; +&set optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = false&; +set optimizer_version=''&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =&false; +set&optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set return_commit_stats = false; +$set optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = false$; +set optimizer_version=''$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =$false; +set$optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set return_commit_stats = false; +@set optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = false@; +set optimizer_version=''@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =@false; +set@optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set return_commit_stats = false; +!set optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = false!; +set optimizer_version=''!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =!false; +set!optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set return_commit_stats = false; +*set optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = false*; +set optimizer_version=''*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =*false; +set*optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set return_commit_stats = false; +(set optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = false(; +set optimizer_version=''(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =(false; +set(optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set return_commit_stats = false; +)set optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = false); +set optimizer_version=''); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =)false; +set)optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set return_commit_stats = false; +-set optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = false-; +set optimizer_version=''-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =-false; +set-optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set return_commit_stats = false; ++set optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = false+; +set optimizer_version=''+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =+false; +set+optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set return_commit_stats = false; +-#set optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = false-#; +set optimizer_version=''-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =-#false; +set-#optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set return_commit_stats = false; +/set optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = false/; +set optimizer_version=''/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =/false; +set/optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set return_commit_stats = false; +\set optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = false\; +set optimizer_version=''\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =\false; +set\optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set return_commit_stats = false; +?set optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = false?; +set optimizer_version=''?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =?false; +set?optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set return_commit_stats = false; +-/set optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = false-/; +set optimizer_version=''-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =-/false; +set-/optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set return_commit_stats = false; +/#set optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = false/#; +set optimizer_version=''/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =/#false; +set/#optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set return_commit_stats = false; +/-set optimizer_version=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats = false/-; +set optimizer_version=''/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set return_commit_stats =/-false; +set/-optimizer_version=''; NEW_CONNECTION; -set max_commit_delay=null; +set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; -SET MAX_COMMIT_DELAY=NULL; +SET OPTIMIZER_STATISTICS_PACKAGE='AUTO_20191128_14_47_22UTC'; NEW_CONNECTION; -set max_commit_delay=null; +set optimizer_statistics_package='auto_20191128_14_47_22utc'; NEW_CONNECTION; - set max_commit_delay=null; + set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; - set max_commit_delay=null; + set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; -set max_commit_delay=null; +set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; -set max_commit_delay=null ; +set optimizer_statistics_package='auto_20191128_14_47_22UTC' ; NEW_CONNECTION; -set max_commit_delay=null ; +set optimizer_statistics_package='auto_20191128_14_47_22UTC' ; NEW_CONNECTION; -set max_commit_delay=null +set optimizer_statistics_package='auto_20191128_14_47_22UTC' ; NEW_CONNECTION; -set max_commit_delay=null; +set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; -set max_commit_delay=null; +set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; set -max_commit_delay=null; +optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set max_commit_delay=null; +foo set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay=null bar; +set optimizer_statistics_package='auto_20191128_14_47_22UTC' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set max_commit_delay=null; +%set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay=null%; +set optimizer_statistics_package='auto_20191128_14_47_22UTC'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%max_commit_delay=null; +set%optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set max_commit_delay=null; +_set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay=null_; +set optimizer_statistics_package='auto_20191128_14_47_22UTC'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_max_commit_delay=null; +set_optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set max_commit_delay=null; +&set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay=null&; +set optimizer_statistics_package='auto_20191128_14_47_22UTC'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&max_commit_delay=null; +set&optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set max_commit_delay=null; +$set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay=null$; +set optimizer_statistics_package='auto_20191128_14_47_22UTC'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$max_commit_delay=null; +set$optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set max_commit_delay=null; +@set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay=null@; +set optimizer_statistics_package='auto_20191128_14_47_22UTC'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@max_commit_delay=null; +set@optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set max_commit_delay=null; +!set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay=null!; +set optimizer_statistics_package='auto_20191128_14_47_22UTC'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!max_commit_delay=null; +set!optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set max_commit_delay=null; +*set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay=null*; +set optimizer_statistics_package='auto_20191128_14_47_22UTC'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*max_commit_delay=null; +set*optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set max_commit_delay=null; +(set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay=null(; +set optimizer_statistics_package='auto_20191128_14_47_22UTC'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(max_commit_delay=null; +set(optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set max_commit_delay=null; +)set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay=null); +set optimizer_statistics_package='auto_20191128_14_47_22UTC'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)max_commit_delay=null; +set)optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set max_commit_delay=null; +-set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay=null-; +set optimizer_statistics_package='auto_20191128_14_47_22UTC'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-max_commit_delay=null; +set-optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set max_commit_delay=null; ++set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay=null+; +set optimizer_statistics_package='auto_20191128_14_47_22UTC'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+max_commit_delay=null; +set+optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set max_commit_delay=null; +-#set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay=null-#; +set optimizer_statistics_package='auto_20191128_14_47_22UTC'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#max_commit_delay=null; +set-#optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set max_commit_delay=null; +/set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay=null/; +set optimizer_statistics_package='auto_20191128_14_47_22UTC'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/max_commit_delay=null; +set/optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set max_commit_delay=null; +\set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay=null\; +set optimizer_statistics_package='auto_20191128_14_47_22UTC'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\max_commit_delay=null; +set\optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set max_commit_delay=null; +?set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay=null?; +set optimizer_statistics_package='auto_20191128_14_47_22UTC'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?max_commit_delay=null; +set?optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set max_commit_delay=null; +-/set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay=null-/; +set optimizer_statistics_package='auto_20191128_14_47_22UTC'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/max_commit_delay=null; +set-/optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set max_commit_delay=null; +/#set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay=null/#; +set optimizer_statistics_package='auto_20191128_14_47_22UTC'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#max_commit_delay=null; +set/#optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set max_commit_delay=null; +/-set optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay=null/-; +set optimizer_statistics_package='auto_20191128_14_47_22UTC'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-max_commit_delay=null; +set/-optimizer_statistics_package='auto_20191128_14_47_22UTC'; NEW_CONNECTION; -set max_commit_delay='1s'; +set optimizer_statistics_package=''; NEW_CONNECTION; -SET MAX_COMMIT_DELAY='1S'; +SET OPTIMIZER_STATISTICS_PACKAGE=''; NEW_CONNECTION; -set max_commit_delay='1s'; +set optimizer_statistics_package=''; NEW_CONNECTION; - set max_commit_delay='1s'; + set optimizer_statistics_package=''; NEW_CONNECTION; - set max_commit_delay='1s'; + set optimizer_statistics_package=''; NEW_CONNECTION; -set max_commit_delay='1s'; +set optimizer_statistics_package=''; NEW_CONNECTION; -set max_commit_delay='1s' ; +set optimizer_statistics_package='' ; NEW_CONNECTION; -set max_commit_delay='1s' ; +set optimizer_statistics_package='' ; NEW_CONNECTION; -set max_commit_delay='1s' +set optimizer_statistics_package='' ; NEW_CONNECTION; -set max_commit_delay='1s'; +set optimizer_statistics_package=''; NEW_CONNECTION; -set max_commit_delay='1s'; +set optimizer_statistics_package=''; NEW_CONNECTION; set -max_commit_delay='1s'; +optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set max_commit_delay='1s'; +foo set optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='1s' bar; +set optimizer_statistics_package='' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set max_commit_delay='1s'; +%set optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='1s'%; +set optimizer_statistics_package=''%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%max_commit_delay='1s'; +set%optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set max_commit_delay='1s'; +_set optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='1s'_; +set optimizer_statistics_package=''_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_max_commit_delay='1s'; +set_optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set max_commit_delay='1s'; +&set optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='1s'&; +set optimizer_statistics_package=''&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&max_commit_delay='1s'; +set&optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set max_commit_delay='1s'; +$set optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='1s'$; +set optimizer_statistics_package=''$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$max_commit_delay='1s'; +set$optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set max_commit_delay='1s'; +@set optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='1s'@; +set optimizer_statistics_package=''@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@max_commit_delay='1s'; +set@optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set max_commit_delay='1s'; +!set optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='1s'!; +set optimizer_statistics_package=''!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!max_commit_delay='1s'; +set!optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set max_commit_delay='1s'; +*set optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='1s'*; +set optimizer_statistics_package=''*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*max_commit_delay='1s'; +set*optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set max_commit_delay='1s'; +(set optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='1s'(; +set optimizer_statistics_package=''(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(max_commit_delay='1s'; +set(optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set max_commit_delay='1s'; +)set optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='1s'); +set optimizer_statistics_package=''); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)max_commit_delay='1s'; +set)optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set max_commit_delay='1s'; +-set optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='1s'-; +set optimizer_statistics_package=''-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-max_commit_delay='1s'; +set-optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set max_commit_delay='1s'; ++set optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='1s'+; +set optimizer_statistics_package=''+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+max_commit_delay='1s'; +set+optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set max_commit_delay='1s'; +-#set optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='1s'-#; +set optimizer_statistics_package=''-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#max_commit_delay='1s'; +set-#optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set max_commit_delay='1s'; +/set optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='1s'/; +set optimizer_statistics_package=''/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/max_commit_delay='1s'; +set/optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set max_commit_delay='1s'; +\set optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='1s'\; +set optimizer_statistics_package=''\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\max_commit_delay='1s'; +set\optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set max_commit_delay='1s'; +?set optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='1s'?; +set optimizer_statistics_package=''?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?max_commit_delay='1s'; +set?optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set max_commit_delay='1s'; +-/set optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='1s'-/; +set optimizer_statistics_package=''-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/max_commit_delay='1s'; +set-/optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set max_commit_delay='1s'; +/#set optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='1s'/#; +set optimizer_statistics_package=''/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#max_commit_delay='1s'; +set/#optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set max_commit_delay='1s'; +/-set optimizer_statistics_package=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='1s'/-; +set optimizer_statistics_package=''/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-max_commit_delay='1s'; +set/-optimizer_statistics_package=''; NEW_CONNECTION; -set max_commit_delay='100ms'; +set return_commit_stats = true; NEW_CONNECTION; -SET MAX_COMMIT_DELAY='100MS'; +SET RETURN_COMMIT_STATS = TRUE; NEW_CONNECTION; -set max_commit_delay='100ms'; +set return_commit_stats = true; NEW_CONNECTION; - set max_commit_delay='100ms'; + set return_commit_stats = true; NEW_CONNECTION; - set max_commit_delay='100ms'; + set return_commit_stats = true; NEW_CONNECTION; -set max_commit_delay='100ms'; +set return_commit_stats = true; NEW_CONNECTION; -set max_commit_delay='100ms' ; +set return_commit_stats = true ; NEW_CONNECTION; -set max_commit_delay='100ms' ; +set return_commit_stats = true ; NEW_CONNECTION; -set max_commit_delay='100ms' +set return_commit_stats = true ; NEW_CONNECTION; -set max_commit_delay='100ms'; +set return_commit_stats = true; NEW_CONNECTION; -set max_commit_delay='100ms'; +set return_commit_stats = true; NEW_CONNECTION; set -max_commit_delay='100ms'; +return_commit_stats += +true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set max_commit_delay='100ms'; +foo set return_commit_stats = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='100ms' bar; +set return_commit_stats = true bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set max_commit_delay='100ms'; +%set return_commit_stats = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='100ms'%; +set return_commit_stats = true%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%max_commit_delay='100ms'; +set return_commit_stats =%true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set max_commit_delay='100ms'; +_set return_commit_stats = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='100ms'_; +set return_commit_stats = true_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_max_commit_delay='100ms'; +set return_commit_stats =_true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set max_commit_delay='100ms'; +&set return_commit_stats = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='100ms'&; +set return_commit_stats = true&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&max_commit_delay='100ms'; +set return_commit_stats =&true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set max_commit_delay='100ms'; +$set return_commit_stats = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='100ms'$; +set return_commit_stats = true$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$max_commit_delay='100ms'; +set return_commit_stats =$true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set max_commit_delay='100ms'; +@set return_commit_stats = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='100ms'@; +set return_commit_stats = true@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@max_commit_delay='100ms'; +set return_commit_stats =@true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set max_commit_delay='100ms'; +!set return_commit_stats = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='100ms'!; +set return_commit_stats = true!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!max_commit_delay='100ms'; +set return_commit_stats =!true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set max_commit_delay='100ms'; +*set return_commit_stats = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='100ms'*; +set return_commit_stats = true*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*max_commit_delay='100ms'; +set return_commit_stats =*true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set max_commit_delay='100ms'; +(set return_commit_stats = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='100ms'(; +set return_commit_stats = true(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(max_commit_delay='100ms'; +set return_commit_stats =(true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set max_commit_delay='100ms'; +)set return_commit_stats = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='100ms'); +set return_commit_stats = true); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)max_commit_delay='100ms'; +set return_commit_stats =)true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set max_commit_delay='100ms'; +-set return_commit_stats = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='100ms'-; +set return_commit_stats = true-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-max_commit_delay='100ms'; +set return_commit_stats =-true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set max_commit_delay='100ms'; ++set return_commit_stats = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='100ms'+; +set return_commit_stats = true+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+max_commit_delay='100ms'; +set return_commit_stats =+true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set max_commit_delay='100ms'; +-#set return_commit_stats = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='100ms'-#; +set return_commit_stats = true-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#max_commit_delay='100ms'; +set return_commit_stats =-#true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set max_commit_delay='100ms'; +/set return_commit_stats = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='100ms'/; +set return_commit_stats = true/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/max_commit_delay='100ms'; +set return_commit_stats =/true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set max_commit_delay='100ms'; +\set return_commit_stats = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='100ms'\; +set return_commit_stats = true\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\max_commit_delay='100ms'; +set return_commit_stats =\true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set max_commit_delay='100ms'; +?set return_commit_stats = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='100ms'?; +set return_commit_stats = true?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?max_commit_delay='100ms'; +set return_commit_stats =?true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set max_commit_delay='100ms'; +-/set return_commit_stats = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='100ms'-/; +set return_commit_stats = true-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/max_commit_delay='100ms'; +set return_commit_stats =-/true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set max_commit_delay='100ms'; +/#set return_commit_stats = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='100ms'/#; +set return_commit_stats = true/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#max_commit_delay='100ms'; +set return_commit_stats =/#true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set max_commit_delay='100ms'; +/-set return_commit_stats = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='100ms'/-; +set return_commit_stats = true/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-max_commit_delay='100ms'; +set return_commit_stats =/-true; NEW_CONNECTION; -set max_commit_delay='10000us'; +set return_commit_stats = false; NEW_CONNECTION; -SET MAX_COMMIT_DELAY='10000US'; +SET RETURN_COMMIT_STATS = FALSE; NEW_CONNECTION; -set max_commit_delay='10000us'; +set return_commit_stats = false; NEW_CONNECTION; - set max_commit_delay='10000us'; + set return_commit_stats = false; NEW_CONNECTION; - set max_commit_delay='10000us'; + set return_commit_stats = false; NEW_CONNECTION; -set max_commit_delay='10000us'; +set return_commit_stats = false; NEW_CONNECTION; -set max_commit_delay='10000us' ; +set return_commit_stats = false ; NEW_CONNECTION; -set max_commit_delay='10000us' ; +set return_commit_stats = false ; NEW_CONNECTION; -set max_commit_delay='10000us' +set return_commit_stats = false ; NEW_CONNECTION; -set max_commit_delay='10000us'; +set return_commit_stats = false; NEW_CONNECTION; -set max_commit_delay='10000us'; +set return_commit_stats = false; NEW_CONNECTION; set -max_commit_delay='10000us'; +return_commit_stats += +false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set max_commit_delay='10000us'; +foo set return_commit_stats = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='10000us' bar; +set return_commit_stats = false bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set max_commit_delay='10000us'; +%set return_commit_stats = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='10000us'%; +set return_commit_stats = false%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%max_commit_delay='10000us'; +set return_commit_stats =%false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set max_commit_delay='10000us'; +_set return_commit_stats = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='10000us'_; +set return_commit_stats = false_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_max_commit_delay='10000us'; +set return_commit_stats =_false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set max_commit_delay='10000us'; +&set return_commit_stats = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='10000us'&; +set return_commit_stats = false&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&max_commit_delay='10000us'; +set return_commit_stats =&false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set max_commit_delay='10000us'; +$set return_commit_stats = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='10000us'$; +set return_commit_stats = false$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$max_commit_delay='10000us'; +set return_commit_stats =$false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set max_commit_delay='10000us'; +@set return_commit_stats = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='10000us'@; +set return_commit_stats = false@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@max_commit_delay='10000us'; +set return_commit_stats =@false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set max_commit_delay='10000us'; +!set return_commit_stats = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='10000us'!; +set return_commit_stats = false!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!max_commit_delay='10000us'; +set return_commit_stats =!false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set max_commit_delay='10000us'; +*set return_commit_stats = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='10000us'*; +set return_commit_stats = false*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*max_commit_delay='10000us'; +set return_commit_stats =*false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set max_commit_delay='10000us'; +(set return_commit_stats = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='10000us'(; +set return_commit_stats = false(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(max_commit_delay='10000us'; +set return_commit_stats =(false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set max_commit_delay='10000us'; +)set return_commit_stats = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='10000us'); +set return_commit_stats = false); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)max_commit_delay='10000us'; +set return_commit_stats =)false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set max_commit_delay='10000us'; +-set return_commit_stats = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='10000us'-; +set return_commit_stats = false-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-max_commit_delay='10000us'; +set return_commit_stats =-false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set max_commit_delay='10000us'; ++set return_commit_stats = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='10000us'+; +set return_commit_stats = false+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+max_commit_delay='10000us'; +set return_commit_stats =+false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set max_commit_delay='10000us'; +-#set return_commit_stats = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='10000us'-#; +set return_commit_stats = false-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#max_commit_delay='10000us'; +set return_commit_stats =-#false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set max_commit_delay='10000us'; +/set return_commit_stats = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='10000us'/; +set return_commit_stats = false/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/max_commit_delay='10000us'; +set return_commit_stats =/false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set max_commit_delay='10000us'; +\set return_commit_stats = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='10000us'\; +set return_commit_stats = false\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\max_commit_delay='10000us'; +set return_commit_stats =\false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set max_commit_delay='10000us'; +?set return_commit_stats = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='10000us'?; +set return_commit_stats = false?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?max_commit_delay='10000us'; +set return_commit_stats =?false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set max_commit_delay='10000us'; +-/set return_commit_stats = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='10000us'-/; +set return_commit_stats = false-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/max_commit_delay='10000us'; +set return_commit_stats =-/false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set max_commit_delay='10000us'; +/#set return_commit_stats = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='10000us'/#; +set return_commit_stats = false/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#max_commit_delay='10000us'; +set return_commit_stats =/#false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set max_commit_delay='10000us'; +/-set return_commit_stats = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='10000us'/-; +set return_commit_stats = false/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-max_commit_delay='10000us'; +set return_commit_stats =/-false; NEW_CONNECTION; -set max_commit_delay='9223372036854775807ns'; +set max_commit_delay=null; NEW_CONNECTION; -SET MAX_COMMIT_DELAY='9223372036854775807NS'; +SET MAX_COMMIT_DELAY=NULL; NEW_CONNECTION; -set max_commit_delay='9223372036854775807ns'; +set max_commit_delay=null; NEW_CONNECTION; - set max_commit_delay='9223372036854775807ns'; + set max_commit_delay=null; NEW_CONNECTION; - set max_commit_delay='9223372036854775807ns'; + set max_commit_delay=null; NEW_CONNECTION; -set max_commit_delay='9223372036854775807ns'; +set max_commit_delay=null; NEW_CONNECTION; -set max_commit_delay='9223372036854775807ns' ; +set max_commit_delay=null ; NEW_CONNECTION; -set max_commit_delay='9223372036854775807ns' ; +set max_commit_delay=null ; NEW_CONNECTION; -set max_commit_delay='9223372036854775807ns' +set max_commit_delay=null ; NEW_CONNECTION; -set max_commit_delay='9223372036854775807ns'; +set max_commit_delay=null; NEW_CONNECTION; -set max_commit_delay='9223372036854775807ns'; +set max_commit_delay=null; NEW_CONNECTION; set -max_commit_delay='9223372036854775807ns'; +max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set max_commit_delay='9223372036854775807ns'; +foo set max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='9223372036854775807ns' bar; +set max_commit_delay=null bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set max_commit_delay='9223372036854775807ns'; +%set max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='9223372036854775807ns'%; +set max_commit_delay=null%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%max_commit_delay='9223372036854775807ns'; +set%max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set max_commit_delay='9223372036854775807ns'; +_set max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='9223372036854775807ns'_; +set max_commit_delay=null_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_max_commit_delay='9223372036854775807ns'; +set_max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set max_commit_delay='9223372036854775807ns'; +&set max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='9223372036854775807ns'&; +set max_commit_delay=null&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&max_commit_delay='9223372036854775807ns'; +set&max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set max_commit_delay='9223372036854775807ns'; +$set max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='9223372036854775807ns'$; +set max_commit_delay=null$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$max_commit_delay='9223372036854775807ns'; +set$max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set max_commit_delay='9223372036854775807ns'; +@set max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='9223372036854775807ns'@; +set max_commit_delay=null@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@max_commit_delay='9223372036854775807ns'; +set@max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set max_commit_delay='9223372036854775807ns'; +!set max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='9223372036854775807ns'!; +set max_commit_delay=null!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!max_commit_delay='9223372036854775807ns'; +set!max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set max_commit_delay='9223372036854775807ns'; +*set max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='9223372036854775807ns'*; +set max_commit_delay=null*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*max_commit_delay='9223372036854775807ns'; +set*max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set max_commit_delay='9223372036854775807ns'; +(set max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='9223372036854775807ns'(; +set max_commit_delay=null(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(max_commit_delay='9223372036854775807ns'; +set(max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set max_commit_delay='9223372036854775807ns'; +)set max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='9223372036854775807ns'); +set max_commit_delay=null); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)max_commit_delay='9223372036854775807ns'; +set)max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set max_commit_delay='9223372036854775807ns'; +-set max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='9223372036854775807ns'-; +set max_commit_delay=null-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-max_commit_delay='9223372036854775807ns'; +set-max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set max_commit_delay='9223372036854775807ns'; ++set max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='9223372036854775807ns'+; +set max_commit_delay=null+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+max_commit_delay='9223372036854775807ns'; +set+max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set max_commit_delay='9223372036854775807ns'; +-#set max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='9223372036854775807ns'-#; +set max_commit_delay=null-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#max_commit_delay='9223372036854775807ns'; +set-#max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set max_commit_delay='9223372036854775807ns'; +/set max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='9223372036854775807ns'/; +set max_commit_delay=null/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/max_commit_delay='9223372036854775807ns'; +set/max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set max_commit_delay='9223372036854775807ns'; +\set max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='9223372036854775807ns'\; +set max_commit_delay=null\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\max_commit_delay='9223372036854775807ns'; +set\max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set max_commit_delay='9223372036854775807ns'; +?set max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='9223372036854775807ns'?; +set max_commit_delay=null?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?max_commit_delay='9223372036854775807ns'; +set?max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set max_commit_delay='9223372036854775807ns'; +-/set max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='9223372036854775807ns'-/; +set max_commit_delay=null-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/max_commit_delay='9223372036854775807ns'; +set-/max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set max_commit_delay='9223372036854775807ns'; +/#set max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='9223372036854775807ns'/#; +set max_commit_delay=null/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#max_commit_delay='9223372036854775807ns'; +set/#max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set max_commit_delay='9223372036854775807ns'; +/-set max_commit_delay=null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_commit_delay='9223372036854775807ns'/-; +set max_commit_delay=null/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-max_commit_delay='9223372036854775807ns'; +set/-max_commit_delay=null; NEW_CONNECTION; -set statement_tag='tag1'; +set max_commit_delay = null; NEW_CONNECTION; -SET STATEMENT_TAG='TAG1'; +SET MAX_COMMIT_DELAY = NULL; NEW_CONNECTION; -set statement_tag='tag1'; +set max_commit_delay = null; NEW_CONNECTION; - set statement_tag='tag1'; + set max_commit_delay = null; NEW_CONNECTION; - set statement_tag='tag1'; + set max_commit_delay = null; NEW_CONNECTION; -set statement_tag='tag1'; +set max_commit_delay = null; NEW_CONNECTION; -set statement_tag='tag1' ; +set max_commit_delay = null ; NEW_CONNECTION; -set statement_tag='tag1' ; +set max_commit_delay = null ; NEW_CONNECTION; -set statement_tag='tag1' +set max_commit_delay = null ; NEW_CONNECTION; -set statement_tag='tag1'; +set max_commit_delay = null; NEW_CONNECTION; -set statement_tag='tag1'; +set max_commit_delay = null; NEW_CONNECTION; set -statement_tag='tag1'; +max_commit_delay += +null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set statement_tag='tag1'; +foo set max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag1' bar; +set max_commit_delay = null bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set statement_tag='tag1'; +%set max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag1'%; +set max_commit_delay = null%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%statement_tag='tag1'; +set max_commit_delay =%null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set statement_tag='tag1'; +_set max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag1'_; +set max_commit_delay = null_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_statement_tag='tag1'; +set max_commit_delay =_null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set statement_tag='tag1'; +&set max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag1'&; +set max_commit_delay = null&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&statement_tag='tag1'; +set max_commit_delay =&null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set statement_tag='tag1'; +$set max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag1'$; +set max_commit_delay = null$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$statement_tag='tag1'; +set max_commit_delay =$null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set statement_tag='tag1'; +@set max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag1'@; +set max_commit_delay = null@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@statement_tag='tag1'; +set max_commit_delay =@null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set statement_tag='tag1'; +!set max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag1'!; +set max_commit_delay = null!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!statement_tag='tag1'; +set max_commit_delay =!null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set statement_tag='tag1'; +*set max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag1'*; +set max_commit_delay = null*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*statement_tag='tag1'; +set max_commit_delay =*null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set statement_tag='tag1'; +(set max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag1'(; +set max_commit_delay = null(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(statement_tag='tag1'; +set max_commit_delay =(null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set statement_tag='tag1'; +)set max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag1'); +set max_commit_delay = null); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)statement_tag='tag1'; +set max_commit_delay =)null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set statement_tag='tag1'; +-set max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag1'-; +set max_commit_delay = null-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-statement_tag='tag1'; +set max_commit_delay =-null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set statement_tag='tag1'; ++set max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag1'+; +set max_commit_delay = null+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+statement_tag='tag1'; +set max_commit_delay =+null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set statement_tag='tag1'; +-#set max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag1'-#; +set max_commit_delay = null-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#statement_tag='tag1'; +set max_commit_delay =-#null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set statement_tag='tag1'; +/set max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag1'/; +set max_commit_delay = null/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/statement_tag='tag1'; +set max_commit_delay =/null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set statement_tag='tag1'; +\set max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag1'\; +set max_commit_delay = null\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\statement_tag='tag1'; +set max_commit_delay =\null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set statement_tag='tag1'; +?set max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag1'?; +set max_commit_delay = null?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?statement_tag='tag1'; +set max_commit_delay =?null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set statement_tag='tag1'; +-/set max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag1'-/; +set max_commit_delay = null-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/statement_tag='tag1'; +set max_commit_delay =-/null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set statement_tag='tag1'; +/#set max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag1'/#; +set max_commit_delay = null/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#statement_tag='tag1'; +set max_commit_delay =/#null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set statement_tag='tag1'; +/-set max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag1'/-; +set max_commit_delay = null/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-statement_tag='tag1'; +set max_commit_delay =/-null; NEW_CONNECTION; -set statement_tag='tag2'; +set max_commit_delay = null ; NEW_CONNECTION; -SET STATEMENT_TAG='TAG2'; +SET MAX_COMMIT_DELAY = NULL ; NEW_CONNECTION; -set statement_tag='tag2'; +set max_commit_delay = null ; NEW_CONNECTION; - set statement_tag='tag2'; + set max_commit_delay = null ; NEW_CONNECTION; - set statement_tag='tag2'; + set max_commit_delay = null ; NEW_CONNECTION; -set statement_tag='tag2'; +set max_commit_delay = null ; NEW_CONNECTION; -set statement_tag='tag2' ; +set max_commit_delay = null ; NEW_CONNECTION; -set statement_tag='tag2' ; +set max_commit_delay = null ; NEW_CONNECTION; -set statement_tag='tag2' +set max_commit_delay = null ; NEW_CONNECTION; -set statement_tag='tag2'; +set max_commit_delay = null ; NEW_CONNECTION; -set statement_tag='tag2'; +set max_commit_delay = null ; NEW_CONNECTION; set -statement_tag='tag2'; +max_commit_delay += +null +; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set statement_tag='tag2'; +foo set max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag2' bar; +set max_commit_delay = null bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set statement_tag='tag2'; +%set max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag2'%; +set max_commit_delay = null %; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%statement_tag='tag2'; +set max_commit_delay = null%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set statement_tag='tag2'; +_set max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag2'_; +set max_commit_delay = null _; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_statement_tag='tag2'; +set max_commit_delay = null_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set statement_tag='tag2'; +&set max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag2'&; +set max_commit_delay = null &; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&statement_tag='tag2'; +set max_commit_delay = null&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set statement_tag='tag2'; +$set max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag2'$; +set max_commit_delay = null $; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$statement_tag='tag2'; +set max_commit_delay = null$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set statement_tag='tag2'; +@set max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag2'@; +set max_commit_delay = null @; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@statement_tag='tag2'; +set max_commit_delay = null@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set statement_tag='tag2'; +!set max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag2'!; +set max_commit_delay = null !; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!statement_tag='tag2'; +set max_commit_delay = null!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set statement_tag='tag2'; +*set max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag2'*; +set max_commit_delay = null *; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*statement_tag='tag2'; +set max_commit_delay = null*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set statement_tag='tag2'; +(set max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag2'(; +set max_commit_delay = null (; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(statement_tag='tag2'; +set max_commit_delay = null(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set statement_tag='tag2'; +)set max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag2'); +set max_commit_delay = null ); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)statement_tag='tag2'; +set max_commit_delay = null); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set statement_tag='tag2'; +-set max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag2'-; +set max_commit_delay = null -; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-statement_tag='tag2'; +set max_commit_delay = null-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set statement_tag='tag2'; ++set max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag2'+; +set max_commit_delay = null +; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+statement_tag='tag2'; +set max_commit_delay = null+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set statement_tag='tag2'; +-#set max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag2'-#; +set max_commit_delay = null -#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#statement_tag='tag2'; +set max_commit_delay = null-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set statement_tag='tag2'; +/set max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag2'/; +set max_commit_delay = null /; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/statement_tag='tag2'; +set max_commit_delay = null/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set statement_tag='tag2'; +\set max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag2'\; +set max_commit_delay = null \; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\statement_tag='tag2'; +set max_commit_delay = null\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set statement_tag='tag2'; +?set max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag2'?; +set max_commit_delay = null ?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?statement_tag='tag2'; +set max_commit_delay = null?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set statement_tag='tag2'; +-/set max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag2'-/; +set max_commit_delay = null -/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/statement_tag='tag2'; +set max_commit_delay = null-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set statement_tag='tag2'; +/#set max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag2'/#; +set max_commit_delay = null /#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#statement_tag='tag2'; +set max_commit_delay = null/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set statement_tag='tag2'; +/-set max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='tag2'/-; +set max_commit_delay = null /-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-statement_tag='tag2'; +set max_commit_delay = null/-; NEW_CONNECTION; -set statement_tag=''; +set max_commit_delay=1000; NEW_CONNECTION; -SET STATEMENT_TAG=''; +SET MAX_COMMIT_DELAY=1000; NEW_CONNECTION; -set statement_tag=''; +set max_commit_delay=1000; NEW_CONNECTION; - set statement_tag=''; + set max_commit_delay=1000; NEW_CONNECTION; - set statement_tag=''; + set max_commit_delay=1000; NEW_CONNECTION; -set statement_tag=''; +set max_commit_delay=1000; NEW_CONNECTION; -set statement_tag='' ; +set max_commit_delay=1000 ; NEW_CONNECTION; -set statement_tag='' ; +set max_commit_delay=1000 ; NEW_CONNECTION; -set statement_tag='' +set max_commit_delay=1000 ; NEW_CONNECTION; -set statement_tag=''; +set max_commit_delay=1000; NEW_CONNECTION; -set statement_tag=''; +set max_commit_delay=1000; NEW_CONNECTION; set -statement_tag=''; +max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set statement_tag=''; +foo set max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='' bar; +set max_commit_delay=1000 bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set statement_tag=''; +%set max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag=''%; +set max_commit_delay=1000%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%statement_tag=''; +set%max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set statement_tag=''; +_set max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag=''_; +set max_commit_delay=1000_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_statement_tag=''; +set_max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set statement_tag=''; +&set max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag=''&; +set max_commit_delay=1000&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&statement_tag=''; +set&max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set statement_tag=''; +$set max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag=''$; +set max_commit_delay=1000$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$statement_tag=''; +set$max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set statement_tag=''; +@set max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag=''@; +set max_commit_delay=1000@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@statement_tag=''; +set@max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set statement_tag=''; +!set max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag=''!; +set max_commit_delay=1000!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!statement_tag=''; +set!max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set statement_tag=''; +*set max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag=''*; +set max_commit_delay=1000*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*statement_tag=''; +set*max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set statement_tag=''; +(set max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag=''(; +set max_commit_delay=1000(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(statement_tag=''; +set(max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set statement_tag=''; +)set max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag=''); +set max_commit_delay=1000); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)statement_tag=''; +set)max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set statement_tag=''; +-set max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag=''-; +set max_commit_delay=1000-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-statement_tag=''; +set-max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set statement_tag=''; ++set max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag=''+; +set max_commit_delay=1000+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+statement_tag=''; +set+max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set statement_tag=''; +-#set max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag=''-#; +set max_commit_delay=1000-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#statement_tag=''; +set-#max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set statement_tag=''; +/set max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag=''/; +set max_commit_delay=1000/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/statement_tag=''; +set/max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set statement_tag=''; +\set max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag=''\; +set max_commit_delay=1000\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\statement_tag=''; +set\max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set statement_tag=''; +?set max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag=''?; +set max_commit_delay=1000?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?statement_tag=''; +set?max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set statement_tag=''; +-/set max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag=''-/; +set max_commit_delay=1000-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/statement_tag=''; +set-/max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set statement_tag=''; +/#set max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag=''/#; +set max_commit_delay=1000/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#statement_tag=''; +set/#max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set statement_tag=''; +/-set max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag=''/-; +set max_commit_delay=1000/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-statement_tag=''; +set/-max_commit_delay=1000; NEW_CONNECTION; -set statement_tag='test_tag'; +set max_commit_delay = 1000; NEW_CONNECTION; -SET STATEMENT_TAG='TEST_TAG'; +SET MAX_COMMIT_DELAY = 1000; NEW_CONNECTION; -set statement_tag='test_tag'; +set max_commit_delay = 1000; NEW_CONNECTION; - set statement_tag='test_tag'; + set max_commit_delay = 1000; NEW_CONNECTION; - set statement_tag='test_tag'; + set max_commit_delay = 1000; NEW_CONNECTION; -set statement_tag='test_tag'; +set max_commit_delay = 1000; NEW_CONNECTION; -set statement_tag='test_tag' ; +set max_commit_delay = 1000 ; NEW_CONNECTION; -set statement_tag='test_tag' ; +set max_commit_delay = 1000 ; NEW_CONNECTION; -set statement_tag='test_tag' +set max_commit_delay = 1000 ; NEW_CONNECTION; -set statement_tag='test_tag'; +set max_commit_delay = 1000; NEW_CONNECTION; -set statement_tag='test_tag'; +set max_commit_delay = 1000; NEW_CONNECTION; set -statement_tag='test_tag'; +max_commit_delay += +1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set statement_tag='test_tag'; +foo set max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='test_tag' bar; +set max_commit_delay = 1000 bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set statement_tag='test_tag'; +%set max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='test_tag'%; +set max_commit_delay = 1000%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%statement_tag='test_tag'; +set max_commit_delay =%1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set statement_tag='test_tag'; +_set max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='test_tag'_; +set max_commit_delay = 1000_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_statement_tag='test_tag'; +set max_commit_delay =_1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set statement_tag='test_tag'; +&set max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='test_tag'&; +set max_commit_delay = 1000&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&statement_tag='test_tag'; +set max_commit_delay =&1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set statement_tag='test_tag'; +$set max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='test_tag'$; +set max_commit_delay = 1000$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$statement_tag='test_tag'; +set max_commit_delay =$1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set statement_tag='test_tag'; +@set max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='test_tag'@; +set max_commit_delay = 1000@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@statement_tag='test_tag'; +set max_commit_delay =@1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set statement_tag='test_tag'; +!set max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='test_tag'!; +set max_commit_delay = 1000!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!statement_tag='test_tag'; +set max_commit_delay =!1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set statement_tag='test_tag'; +*set max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='test_tag'*; +set max_commit_delay = 1000*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*statement_tag='test_tag'; +set max_commit_delay =*1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set statement_tag='test_tag'; +(set max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='test_tag'(; +set max_commit_delay = 1000(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(statement_tag='test_tag'; +set max_commit_delay =(1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set statement_tag='test_tag'; +)set max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='test_tag'); +set max_commit_delay = 1000); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)statement_tag='test_tag'; +set max_commit_delay =)1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set statement_tag='test_tag'; +-set max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='test_tag'-; +set max_commit_delay = 1000-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-statement_tag='test_tag'; +set max_commit_delay =-1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set statement_tag='test_tag'; ++set max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='test_tag'+; +set max_commit_delay = 1000+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+statement_tag='test_tag'; +set max_commit_delay =+1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set statement_tag='test_tag'; +-#set max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='test_tag'-#; +set max_commit_delay = 1000-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#statement_tag='test_tag'; +set max_commit_delay =-#1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set statement_tag='test_tag'; +/set max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='test_tag'/; +set max_commit_delay = 1000/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/statement_tag='test_tag'; +set max_commit_delay =/1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set statement_tag='test_tag'; +\set max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='test_tag'\; +set max_commit_delay = 1000\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\statement_tag='test_tag'; +set max_commit_delay =\1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set statement_tag='test_tag'; +?set max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='test_tag'?; +set max_commit_delay = 1000?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?statement_tag='test_tag'; +set max_commit_delay =?1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set statement_tag='test_tag'; +-/set max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='test_tag'-/; +set max_commit_delay = 1000-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/statement_tag='test_tag'; +set max_commit_delay =-/1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set statement_tag='test_tag'; +/#set max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='test_tag'/#; +set max_commit_delay = 1000/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#statement_tag='test_tag'; +set max_commit_delay =/#1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set statement_tag='test_tag'; +/-set max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set statement_tag='test_tag'/-; +set max_commit_delay = 1000/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-statement_tag='test_tag'; +set max_commit_delay =/-1000; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='tag1'; +set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; -SET TRANSACTION_TAG='TAG1'; +SET MAX_COMMIT_DELAY = 1000 ; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='tag1'; +set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; - set transaction_tag='tag1'; + set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; - set transaction_tag='tag1'; + set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='tag1'; +set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='tag1' ; +set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='tag1' ; +set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='tag1' +set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='tag1'; +set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='tag1'; +set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; set -transaction_tag='tag1'; +max_commit_delay += +1000 +; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set transaction_tag='tag1'; +foo set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag1' bar; +set max_commit_delay = 1000 bar; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -%set transaction_tag='tag1'; +%set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag1'%; +set max_commit_delay = 1000 %; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set%transaction_tag='tag1'; +set max_commit_delay = 1000%; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -_set transaction_tag='tag1'; +_set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag1'_; +set max_commit_delay = 1000 _; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set_transaction_tag='tag1'; +set max_commit_delay = 1000_; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -&set transaction_tag='tag1'; +&set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag1'&; +set max_commit_delay = 1000 &; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set&transaction_tag='tag1'; +set max_commit_delay = 1000&; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -$set transaction_tag='tag1'; +$set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag1'$; +set max_commit_delay = 1000 $; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set$transaction_tag='tag1'; +set max_commit_delay = 1000$; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -@set transaction_tag='tag1'; +@set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag1'@; +set max_commit_delay = 1000 @; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set@transaction_tag='tag1'; +set max_commit_delay = 1000@; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -!set transaction_tag='tag1'; +!set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag1'!; +set max_commit_delay = 1000 !; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set!transaction_tag='tag1'; +set max_commit_delay = 1000!; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -*set transaction_tag='tag1'; +*set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag1'*; +set max_commit_delay = 1000 *; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set*transaction_tag='tag1'; +set max_commit_delay = 1000*; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -(set transaction_tag='tag1'; +(set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag1'(; +set max_commit_delay = 1000 (; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set(transaction_tag='tag1'; +set max_commit_delay = 1000(; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -)set transaction_tag='tag1'; +)set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag1'); +set max_commit_delay = 1000 ); NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set)transaction_tag='tag1'; +set max_commit_delay = 1000); NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --set transaction_tag='tag1'; +-set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag1'-; +set max_commit_delay = 1000 -; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set-transaction_tag='tag1'; +set max_commit_delay = 1000-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -+set transaction_tag='tag1'; ++set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag1'+; +set max_commit_delay = 1000 +; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set+transaction_tag='tag1'; +set max_commit_delay = 1000+; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --#set transaction_tag='tag1'; +-#set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag1'-#; +set max_commit_delay = 1000 -#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#transaction_tag='tag1'; +set max_commit_delay = 1000-#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/set transaction_tag='tag1'; +/set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag1'/; +set max_commit_delay = 1000 /; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set/transaction_tag='tag1'; +set max_commit_delay = 1000/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -\set transaction_tag='tag1'; +\set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag1'\; +set max_commit_delay = 1000 \; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set\transaction_tag='tag1'; +set max_commit_delay = 1000\; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -?set transaction_tag='tag1'; +?set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag1'?; +set max_commit_delay = 1000 ?; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set?transaction_tag='tag1'; +set max_commit_delay = 1000?; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --/set transaction_tag='tag1'; +-/set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag1'-/; +set max_commit_delay = 1000 -/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/transaction_tag='tag1'; +set max_commit_delay = 1000-/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set transaction_tag='tag1'; +/#set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag1'/#; +set max_commit_delay = 1000 /#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#transaction_tag='tag1'; +set max_commit_delay = 1000/#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set transaction_tag='tag1'; +/-set max_commit_delay = 1000 ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag1'/-; +set max_commit_delay = 1000 /-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-transaction_tag='tag1'; +set max_commit_delay = 1000/-; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='tag2'; +set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; -SET TRANSACTION_TAG='TAG2'; +SET MAX_COMMIT_DELAY='1S'; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='tag2'; +set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; - set transaction_tag='tag2'; + set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; - set transaction_tag='tag2'; + set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='tag2'; +set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='tag2' ; +set max_commit_delay='1s' ; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='tag2' ; +set max_commit_delay='1s' ; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='tag2' +set max_commit_delay='1s' ; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='tag2'; +set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='tag2'; +set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; set -transaction_tag='tag2'; +max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set transaction_tag='tag2'; +foo set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag2' bar; +set max_commit_delay='1s' bar; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -%set transaction_tag='tag2'; +%set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag2'%; +set max_commit_delay='1s'%; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set%transaction_tag='tag2'; +set%max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -_set transaction_tag='tag2'; +_set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag2'_; +set max_commit_delay='1s'_; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set_transaction_tag='tag2'; +set_max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -&set transaction_tag='tag2'; +&set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag2'&; +set max_commit_delay='1s'&; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set&transaction_tag='tag2'; +set&max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -$set transaction_tag='tag2'; +$set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag2'$; +set max_commit_delay='1s'$; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set$transaction_tag='tag2'; +set$max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -@set transaction_tag='tag2'; +@set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag2'@; +set max_commit_delay='1s'@; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set@transaction_tag='tag2'; +set@max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -!set transaction_tag='tag2'; +!set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag2'!; +set max_commit_delay='1s'!; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set!transaction_tag='tag2'; +set!max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -*set transaction_tag='tag2'; +*set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag2'*; +set max_commit_delay='1s'*; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set*transaction_tag='tag2'; +set*max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -(set transaction_tag='tag2'; +(set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag2'(; +set max_commit_delay='1s'(; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set(transaction_tag='tag2'; +set(max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -)set transaction_tag='tag2'; +)set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag2'); +set max_commit_delay='1s'); NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set)transaction_tag='tag2'; +set)max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --set transaction_tag='tag2'; +-set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag2'-; +set max_commit_delay='1s'-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set-transaction_tag='tag2'; +set-max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -+set transaction_tag='tag2'; ++set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag2'+; +set max_commit_delay='1s'+; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set+transaction_tag='tag2'; +set+max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --#set transaction_tag='tag2'; +-#set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag2'-#; +set max_commit_delay='1s'-#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#transaction_tag='tag2'; +set-#max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/set transaction_tag='tag2'; +/set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag2'/; +set max_commit_delay='1s'/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set/transaction_tag='tag2'; +set/max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -\set transaction_tag='tag2'; +\set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag2'\; +set max_commit_delay='1s'\; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set\transaction_tag='tag2'; +set\max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -?set transaction_tag='tag2'; +?set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag2'?; +set max_commit_delay='1s'?; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set?transaction_tag='tag2'; +set?max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --/set transaction_tag='tag2'; +-/set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag2'-/; +set max_commit_delay='1s'-/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/transaction_tag='tag2'; +set-/max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set transaction_tag='tag2'; +/#set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag2'/#; +set max_commit_delay='1s'/#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#transaction_tag='tag2'; +set/#max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set transaction_tag='tag2'; +/-set max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='tag2'/-; +set max_commit_delay='1s'/-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-transaction_tag='tag2'; +set/-max_commit_delay='1s'; NEW_CONNECTION; -set autocommit = false; -set transaction_tag=''; +set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; -SET TRANSACTION_TAG=''; +SET MAX_COMMIT_DELAY = '1S'; NEW_CONNECTION; -set autocommit = false; -set transaction_tag=''; +set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; - set transaction_tag=''; + set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; - set transaction_tag=''; + set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; -set transaction_tag=''; +set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='' ; +set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='' ; +set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='' +set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; -set transaction_tag=''; +set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; -set transaction_tag=''; +set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; set -transaction_tag=''; +max_commit_delay += +'1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set transaction_tag=''; +foo set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='' bar; +set max_commit_delay = '1s' bar; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -%set transaction_tag=''; +%set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag=''%; +set max_commit_delay = '1s'%; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set%transaction_tag=''; +set max_commit_delay =%'1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -_set transaction_tag=''; +_set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag=''_; +set max_commit_delay = '1s'_; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set_transaction_tag=''; +set max_commit_delay =_'1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -&set transaction_tag=''; +&set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag=''&; +set max_commit_delay = '1s'&; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set&transaction_tag=''; +set max_commit_delay =&'1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -$set transaction_tag=''; +$set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag=''$; +set max_commit_delay = '1s'$; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set$transaction_tag=''; +set max_commit_delay =$'1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -@set transaction_tag=''; +@set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag=''@; +set max_commit_delay = '1s'@; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set@transaction_tag=''; +set max_commit_delay =@'1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -!set transaction_tag=''; +!set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag=''!; +set max_commit_delay = '1s'!; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set!transaction_tag=''; +set max_commit_delay =!'1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -*set transaction_tag=''; +*set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag=''*; +set max_commit_delay = '1s'*; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set*transaction_tag=''; +set max_commit_delay =*'1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -(set transaction_tag=''; +(set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag=''(; +set max_commit_delay = '1s'(; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set(transaction_tag=''; +set max_commit_delay =('1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -)set transaction_tag=''; +)set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag=''); +set max_commit_delay = '1s'); NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set)transaction_tag=''; +set max_commit_delay =)'1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --set transaction_tag=''; +-set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag=''-; +set max_commit_delay = '1s'-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set-transaction_tag=''; +set max_commit_delay =-'1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -+set transaction_tag=''; ++set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag=''+; +set max_commit_delay = '1s'+; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set+transaction_tag=''; +set max_commit_delay =+'1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --#set transaction_tag=''; +-#set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag=''-#; +set max_commit_delay = '1s'-#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#transaction_tag=''; +set max_commit_delay =-#'1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/set transaction_tag=''; +/set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag=''/; +set max_commit_delay = '1s'/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set/transaction_tag=''; +set max_commit_delay =/'1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -\set transaction_tag=''; +\set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag=''\; +set max_commit_delay = '1s'\; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set\transaction_tag=''; +set max_commit_delay =\'1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -?set transaction_tag=''; +?set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag=''?; +set max_commit_delay = '1s'?; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set?transaction_tag=''; +set max_commit_delay =?'1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --/set transaction_tag=''; +-/set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag=''-/; +set max_commit_delay = '1s'-/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/transaction_tag=''; +set max_commit_delay =-/'1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set transaction_tag=''; +/#set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag=''/#; +set max_commit_delay = '1s'/#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#transaction_tag=''; +set max_commit_delay =/#'1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set transaction_tag=''; +/-set max_commit_delay = '1s'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag=''/-; +set max_commit_delay = '1s'/-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-transaction_tag=''; +set max_commit_delay =/-'1s'; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='test_tag'; +set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; -SET TRANSACTION_TAG='TEST_TAG'; +SET MAX_COMMIT_DELAY = '1S' ; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='test_tag'; +set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; - set transaction_tag='test_tag'; + set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; - set transaction_tag='test_tag'; + set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='test_tag'; +set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='test_tag' ; +set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='test_tag' ; +set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='test_tag' +set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='test_tag'; +set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; -set transaction_tag='test_tag'; +set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; set -transaction_tag='test_tag'; +max_commit_delay += +'1s' +; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set transaction_tag='test_tag'; +foo set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='test_tag' bar; +set max_commit_delay = '1s' bar; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -%set transaction_tag='test_tag'; +%set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='test_tag'%; +set max_commit_delay = '1s' %; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set%transaction_tag='test_tag'; +set max_commit_delay = '1s'%; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -_set transaction_tag='test_tag'; +_set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='test_tag'_; +set max_commit_delay = '1s' _; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set_transaction_tag='test_tag'; +set max_commit_delay = '1s'_; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -&set transaction_tag='test_tag'; +&set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='test_tag'&; +set max_commit_delay = '1s' &; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set&transaction_tag='test_tag'; +set max_commit_delay = '1s'&; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -$set transaction_tag='test_tag'; +$set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='test_tag'$; +set max_commit_delay = '1s' $; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set$transaction_tag='test_tag'; +set max_commit_delay = '1s'$; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -@set transaction_tag='test_tag'; +@set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='test_tag'@; +set max_commit_delay = '1s' @; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set@transaction_tag='test_tag'; +set max_commit_delay = '1s'@; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -!set transaction_tag='test_tag'; +!set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='test_tag'!; +set max_commit_delay = '1s' !; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set!transaction_tag='test_tag'; +set max_commit_delay = '1s'!; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -*set transaction_tag='test_tag'; +*set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='test_tag'*; +set max_commit_delay = '1s' *; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set*transaction_tag='test_tag'; +set max_commit_delay = '1s'*; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -(set transaction_tag='test_tag'; +(set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='test_tag'(; +set max_commit_delay = '1s' (; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set(transaction_tag='test_tag'; +set max_commit_delay = '1s'(; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -)set transaction_tag='test_tag'; +)set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='test_tag'); +set max_commit_delay = '1s' ); NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set)transaction_tag='test_tag'; +set max_commit_delay = '1s'); NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --set transaction_tag='test_tag'; +-set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='test_tag'-; +set max_commit_delay = '1s' -; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set-transaction_tag='test_tag'; +set max_commit_delay = '1s'-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -+set transaction_tag='test_tag'; ++set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='test_tag'+; +set max_commit_delay = '1s' +; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set+transaction_tag='test_tag'; +set max_commit_delay = '1s'+; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --#set transaction_tag='test_tag'; +-#set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='test_tag'-#; +set max_commit_delay = '1s' -#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#transaction_tag='test_tag'; +set max_commit_delay = '1s'-#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/set transaction_tag='test_tag'; +/set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='test_tag'/; +set max_commit_delay = '1s' /; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set/transaction_tag='test_tag'; +set max_commit_delay = '1s'/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -\set transaction_tag='test_tag'; +\set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='test_tag'\; +set max_commit_delay = '1s' \; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set\transaction_tag='test_tag'; +set max_commit_delay = '1s'\; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -?set transaction_tag='test_tag'; +?set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='test_tag'?; +set max_commit_delay = '1s' ?; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set?transaction_tag='test_tag'; +set max_commit_delay = '1s'?; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --/set transaction_tag='test_tag'; +-/set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='test_tag'-/; +set max_commit_delay = '1s' -/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/transaction_tag='test_tag'; +set max_commit_delay = '1s'-/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set transaction_tag='test_tag'; +/#set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='test_tag'/#; +set max_commit_delay = '1s' /#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#transaction_tag='test_tag'; +set max_commit_delay = '1s'/#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set transaction_tag='test_tag'; +/-set max_commit_delay = '1s' ; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set transaction_tag='test_tag'/-; +set max_commit_delay = '1s' /-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-transaction_tag='test_tag'; +set max_commit_delay = '1s'/-; NEW_CONNECTION; -set rpc_priority='HIGH'; +set max_commit_delay='100ms'; NEW_CONNECTION; -SET RPC_PRIORITY='HIGH'; +SET MAX_COMMIT_DELAY='100MS'; NEW_CONNECTION; -set rpc_priority='high'; +set max_commit_delay='100ms'; NEW_CONNECTION; - set rpc_priority='HIGH'; + set max_commit_delay='100ms'; NEW_CONNECTION; - set rpc_priority='HIGH'; + set max_commit_delay='100ms'; NEW_CONNECTION; -set rpc_priority='HIGH'; +set max_commit_delay='100ms'; NEW_CONNECTION; -set rpc_priority='HIGH' ; +set max_commit_delay='100ms' ; NEW_CONNECTION; -set rpc_priority='HIGH' ; +set max_commit_delay='100ms' ; NEW_CONNECTION; -set rpc_priority='HIGH' +set max_commit_delay='100ms' ; NEW_CONNECTION; -set rpc_priority='HIGH'; +set max_commit_delay='100ms'; NEW_CONNECTION; -set rpc_priority='HIGH'; +set max_commit_delay='100ms'; NEW_CONNECTION; set -rpc_priority='HIGH'; +max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set rpc_priority='HIGH'; +foo set max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='HIGH' bar; +set max_commit_delay='100ms' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set rpc_priority='HIGH'; +%set max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='HIGH'%; +set max_commit_delay='100ms'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%rpc_priority='HIGH'; +set%max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set rpc_priority='HIGH'; +_set max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='HIGH'_; +set max_commit_delay='100ms'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_rpc_priority='HIGH'; +set_max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set rpc_priority='HIGH'; +&set max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='HIGH'&; +set max_commit_delay='100ms'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&rpc_priority='HIGH'; +set&max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set rpc_priority='HIGH'; +$set max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='HIGH'$; +set max_commit_delay='100ms'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$rpc_priority='HIGH'; +set$max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set rpc_priority='HIGH'; +@set max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='HIGH'@; +set max_commit_delay='100ms'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@rpc_priority='HIGH'; +set@max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set rpc_priority='HIGH'; +!set max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='HIGH'!; +set max_commit_delay='100ms'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!rpc_priority='HIGH'; +set!max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set rpc_priority='HIGH'; +*set max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='HIGH'*; +set max_commit_delay='100ms'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*rpc_priority='HIGH'; +set*max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set rpc_priority='HIGH'; +(set max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='HIGH'(; +set max_commit_delay='100ms'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(rpc_priority='HIGH'; +set(max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set rpc_priority='HIGH'; +)set max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='HIGH'); +set max_commit_delay='100ms'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)rpc_priority='HIGH'; +set)max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set rpc_priority='HIGH'; +-set max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='HIGH'-; +set max_commit_delay='100ms'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-rpc_priority='HIGH'; +set-max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set rpc_priority='HIGH'; ++set max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='HIGH'+; +set max_commit_delay='100ms'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+rpc_priority='HIGH'; +set+max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set rpc_priority='HIGH'; +-#set max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='HIGH'-#; +set max_commit_delay='100ms'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#rpc_priority='HIGH'; +set-#max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set rpc_priority='HIGH'; +/set max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='HIGH'/; +set max_commit_delay='100ms'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/rpc_priority='HIGH'; +set/max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set rpc_priority='HIGH'; +\set max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='HIGH'\; +set max_commit_delay='100ms'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\rpc_priority='HIGH'; +set\max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set rpc_priority='HIGH'; +?set max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='HIGH'?; +set max_commit_delay='100ms'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?rpc_priority='HIGH'; +set?max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set rpc_priority='HIGH'; +-/set max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='HIGH'-/; +set max_commit_delay='100ms'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/rpc_priority='HIGH'; +set-/max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set rpc_priority='HIGH'; +/#set max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='HIGH'/#; +set max_commit_delay='100ms'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#rpc_priority='HIGH'; +set/#max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set rpc_priority='HIGH'; +/-set max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='HIGH'/-; +set max_commit_delay='100ms'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-rpc_priority='HIGH'; +set/-max_commit_delay='100ms'; NEW_CONNECTION; -set rpc_priority='MEDIUM'; +set max_commit_delay='10000us'; NEW_CONNECTION; -SET RPC_PRIORITY='MEDIUM'; +SET MAX_COMMIT_DELAY='10000US'; NEW_CONNECTION; -set rpc_priority='medium'; +set max_commit_delay='10000us'; NEW_CONNECTION; - set rpc_priority='MEDIUM'; + set max_commit_delay='10000us'; NEW_CONNECTION; - set rpc_priority='MEDIUM'; + set max_commit_delay='10000us'; NEW_CONNECTION; -set rpc_priority='MEDIUM'; +set max_commit_delay='10000us'; NEW_CONNECTION; -set rpc_priority='MEDIUM' ; +set max_commit_delay='10000us' ; NEW_CONNECTION; -set rpc_priority='MEDIUM' ; +set max_commit_delay='10000us' ; NEW_CONNECTION; -set rpc_priority='MEDIUM' +set max_commit_delay='10000us' ; NEW_CONNECTION; -set rpc_priority='MEDIUM'; +set max_commit_delay='10000us'; NEW_CONNECTION; -set rpc_priority='MEDIUM'; +set max_commit_delay='10000us'; NEW_CONNECTION; set -rpc_priority='MEDIUM'; +max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set rpc_priority='MEDIUM'; +foo set max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='MEDIUM' bar; +set max_commit_delay='10000us' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set rpc_priority='MEDIUM'; +%set max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='MEDIUM'%; +set max_commit_delay='10000us'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%rpc_priority='MEDIUM'; +set%max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set rpc_priority='MEDIUM'; +_set max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='MEDIUM'_; +set max_commit_delay='10000us'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_rpc_priority='MEDIUM'; +set_max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set rpc_priority='MEDIUM'; +&set max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='MEDIUM'&; +set max_commit_delay='10000us'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&rpc_priority='MEDIUM'; +set&max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set rpc_priority='MEDIUM'; +$set max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='MEDIUM'$; +set max_commit_delay='10000us'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$rpc_priority='MEDIUM'; +set$max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set rpc_priority='MEDIUM'; +@set max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='MEDIUM'@; +set max_commit_delay='10000us'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@rpc_priority='MEDIUM'; +set@max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set rpc_priority='MEDIUM'; +!set max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='MEDIUM'!; +set max_commit_delay='10000us'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!rpc_priority='MEDIUM'; +set!max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set rpc_priority='MEDIUM'; +*set max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='MEDIUM'*; +set max_commit_delay='10000us'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*rpc_priority='MEDIUM'; +set*max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set rpc_priority='MEDIUM'; +(set max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='MEDIUM'(; +set max_commit_delay='10000us'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(rpc_priority='MEDIUM'; +set(max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set rpc_priority='MEDIUM'; +)set max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='MEDIUM'); +set max_commit_delay='10000us'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)rpc_priority='MEDIUM'; +set)max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set rpc_priority='MEDIUM'; +-set max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='MEDIUM'-; +set max_commit_delay='10000us'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-rpc_priority='MEDIUM'; +set-max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set rpc_priority='MEDIUM'; ++set max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='MEDIUM'+; +set max_commit_delay='10000us'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+rpc_priority='MEDIUM'; +set+max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set rpc_priority='MEDIUM'; +-#set max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='MEDIUM'-#; +set max_commit_delay='10000us'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#rpc_priority='MEDIUM'; +set-#max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set rpc_priority='MEDIUM'; +/set max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='MEDIUM'/; +set max_commit_delay='10000us'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/rpc_priority='MEDIUM'; +set/max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set rpc_priority='MEDIUM'; +\set max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='MEDIUM'\; +set max_commit_delay='10000us'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\rpc_priority='MEDIUM'; +set\max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set rpc_priority='MEDIUM'; +?set max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='MEDIUM'?; +set max_commit_delay='10000us'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?rpc_priority='MEDIUM'; +set?max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set rpc_priority='MEDIUM'; +-/set max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='MEDIUM'-/; +set max_commit_delay='10000us'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/rpc_priority='MEDIUM'; +set-/max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set rpc_priority='MEDIUM'; +/#set max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='MEDIUM'/#; +set max_commit_delay='10000us'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#rpc_priority='MEDIUM'; +set/#max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set rpc_priority='MEDIUM'; +/-set max_commit_delay='10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='MEDIUM'/-; +set max_commit_delay='10000us'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-rpc_priority='MEDIUM'; +set/-max_commit_delay='10000us'; NEW_CONNECTION; -set rpc_priority='LOW'; +set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; -SET RPC_PRIORITY='LOW'; +SET MAX_COMMIT_DELAY='9223372036854775807NS'; NEW_CONNECTION; -set rpc_priority='low'; +set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; - set rpc_priority='LOW'; + set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; - set rpc_priority='LOW'; + set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; -set rpc_priority='LOW'; +set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; -set rpc_priority='LOW' ; +set max_commit_delay='9223372036854775807ns' ; NEW_CONNECTION; -set rpc_priority='LOW' ; +set max_commit_delay='9223372036854775807ns' ; NEW_CONNECTION; -set rpc_priority='LOW' +set max_commit_delay='9223372036854775807ns' ; NEW_CONNECTION; -set rpc_priority='LOW'; +set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; -set rpc_priority='LOW'; +set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; set -rpc_priority='LOW'; +max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set rpc_priority='LOW'; +foo set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='LOW' bar; +set max_commit_delay='9223372036854775807ns' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set rpc_priority='LOW'; +%set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='LOW'%; +set max_commit_delay='9223372036854775807ns'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%rpc_priority='LOW'; +set%max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set rpc_priority='LOW'; +_set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='LOW'_; +set max_commit_delay='9223372036854775807ns'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_rpc_priority='LOW'; +set_max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set rpc_priority='LOW'; +&set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='LOW'&; +set max_commit_delay='9223372036854775807ns'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&rpc_priority='LOW'; +set&max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set rpc_priority='LOW'; +$set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='LOW'$; +set max_commit_delay='9223372036854775807ns'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$rpc_priority='LOW'; +set$max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set rpc_priority='LOW'; +@set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='LOW'@; +set max_commit_delay='9223372036854775807ns'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@rpc_priority='LOW'; +set@max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set rpc_priority='LOW'; +!set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='LOW'!; +set max_commit_delay='9223372036854775807ns'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!rpc_priority='LOW'; +set!max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set rpc_priority='LOW'; +*set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='LOW'*; +set max_commit_delay='9223372036854775807ns'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*rpc_priority='LOW'; +set*max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set rpc_priority='LOW'; +(set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='LOW'(; +set max_commit_delay='9223372036854775807ns'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(rpc_priority='LOW'; +set(max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set rpc_priority='LOW'; +)set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='LOW'); +set max_commit_delay='9223372036854775807ns'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)rpc_priority='LOW'; +set)max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set rpc_priority='LOW'; +-set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='LOW'-; +set max_commit_delay='9223372036854775807ns'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-rpc_priority='LOW'; +set-max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set rpc_priority='LOW'; ++set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='LOW'+; +set max_commit_delay='9223372036854775807ns'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+rpc_priority='LOW'; +set+max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set rpc_priority='LOW'; +-#set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='LOW'-#; +set max_commit_delay='9223372036854775807ns'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#rpc_priority='LOW'; +set-#max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set rpc_priority='LOW'; +/set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='LOW'/; +set max_commit_delay='9223372036854775807ns'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/rpc_priority='LOW'; +set/max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set rpc_priority='LOW'; +\set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='LOW'\; +set max_commit_delay='9223372036854775807ns'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\rpc_priority='LOW'; +set\max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set rpc_priority='LOW'; +?set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='LOW'?; +set max_commit_delay='9223372036854775807ns'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?rpc_priority='LOW'; +set?max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set rpc_priority='LOW'; +-/set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='LOW'-/; +set max_commit_delay='9223372036854775807ns'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/rpc_priority='LOW'; +set-/max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set rpc_priority='LOW'; +/#set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='LOW'/#; +set max_commit_delay='9223372036854775807ns'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#rpc_priority='LOW'; +set/#max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set rpc_priority='LOW'; +/-set max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='LOW'/-; +set max_commit_delay='9223372036854775807ns'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-rpc_priority='LOW'; +set/-max_commit_delay='9223372036854775807ns'; NEW_CONNECTION; -set rpc_priority='NULL'; +set statement_tag='tag1'; NEW_CONNECTION; -SET RPC_PRIORITY='NULL'; +SET STATEMENT_TAG='TAG1'; NEW_CONNECTION; -set rpc_priority='null'; +set statement_tag='tag1'; NEW_CONNECTION; - set rpc_priority='NULL'; + set statement_tag='tag1'; NEW_CONNECTION; - set rpc_priority='NULL'; + set statement_tag='tag1'; NEW_CONNECTION; -set rpc_priority='NULL'; +set statement_tag='tag1'; NEW_CONNECTION; -set rpc_priority='NULL' ; +set statement_tag='tag1' ; NEW_CONNECTION; -set rpc_priority='NULL' ; +set statement_tag='tag1' ; NEW_CONNECTION; -set rpc_priority='NULL' +set statement_tag='tag1' ; NEW_CONNECTION; -set rpc_priority='NULL'; +set statement_tag='tag1'; NEW_CONNECTION; -set rpc_priority='NULL'; +set statement_tag='tag1'; NEW_CONNECTION; set -rpc_priority='NULL'; +statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set rpc_priority='NULL'; +foo set statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='NULL' bar; +set statement_tag='tag1' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set rpc_priority='NULL'; +%set statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='NULL'%; +set statement_tag='tag1'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%rpc_priority='NULL'; +set%statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set rpc_priority='NULL'; +_set statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='NULL'_; +set statement_tag='tag1'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_rpc_priority='NULL'; +set_statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set rpc_priority='NULL'; +&set statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='NULL'&; +set statement_tag='tag1'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&rpc_priority='NULL'; +set&statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set rpc_priority='NULL'; +$set statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='NULL'$; +set statement_tag='tag1'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$rpc_priority='NULL'; +set$statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set rpc_priority='NULL'; +@set statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='NULL'@; +set statement_tag='tag1'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@rpc_priority='NULL'; +set@statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set rpc_priority='NULL'; +!set statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='NULL'!; +set statement_tag='tag1'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!rpc_priority='NULL'; +set!statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set rpc_priority='NULL'; +*set statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='NULL'*; +set statement_tag='tag1'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*rpc_priority='NULL'; +set*statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set rpc_priority='NULL'; +(set statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='NULL'(; +set statement_tag='tag1'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(rpc_priority='NULL'; +set(statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set rpc_priority='NULL'; +)set statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='NULL'); +set statement_tag='tag1'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)rpc_priority='NULL'; +set)statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set rpc_priority='NULL'; +-set statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='NULL'-; +set statement_tag='tag1'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-rpc_priority='NULL'; +set-statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set rpc_priority='NULL'; ++set statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='NULL'+; +set statement_tag='tag1'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+rpc_priority='NULL'; +set+statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set rpc_priority='NULL'; +-#set statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='NULL'-#; +set statement_tag='tag1'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#rpc_priority='NULL'; +set-#statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set rpc_priority='NULL'; +/set statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='NULL'/; +set statement_tag='tag1'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/rpc_priority='NULL'; +set/statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set rpc_priority='NULL'; +\set statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='NULL'\; +set statement_tag='tag1'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\rpc_priority='NULL'; +set\statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set rpc_priority='NULL'; +?set statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='NULL'?; +set statement_tag='tag1'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?rpc_priority='NULL'; +set?statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set rpc_priority='NULL'; +-/set statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='NULL'-/; +set statement_tag='tag1'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/rpc_priority='NULL'; +set-/statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set rpc_priority='NULL'; +/#set statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='NULL'/#; +set statement_tag='tag1'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#rpc_priority='NULL'; +set/#statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set rpc_priority='NULL'; +/-set statement_tag='tag1'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set rpc_priority='NULL'/-; +set statement_tag='tag1'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-rpc_priority='NULL'; +set/-statement_tag='tag1'; NEW_CONNECTION; -set savepoint_support='ENABLED'; +set statement_tag='tag2'; NEW_CONNECTION; -SET SAVEPOINT_SUPPORT='ENABLED'; +SET STATEMENT_TAG='TAG2'; NEW_CONNECTION; -set savepoint_support='enabled'; +set statement_tag='tag2'; NEW_CONNECTION; - set savepoint_support='ENABLED'; + set statement_tag='tag2'; NEW_CONNECTION; - set savepoint_support='ENABLED'; + set statement_tag='tag2'; NEW_CONNECTION; -set savepoint_support='ENABLED'; +set statement_tag='tag2'; NEW_CONNECTION; -set savepoint_support='ENABLED' ; +set statement_tag='tag2' ; NEW_CONNECTION; -set savepoint_support='ENABLED' ; +set statement_tag='tag2' ; NEW_CONNECTION; -set savepoint_support='ENABLED' +set statement_tag='tag2' ; NEW_CONNECTION; -set savepoint_support='ENABLED'; +set statement_tag='tag2'; NEW_CONNECTION; -set savepoint_support='ENABLED'; +set statement_tag='tag2'; NEW_CONNECTION; set -savepoint_support='ENABLED'; +statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set savepoint_support='ENABLED'; +foo set statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='ENABLED' bar; +set statement_tag='tag2' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set savepoint_support='ENABLED'; +%set statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='ENABLED'%; +set statement_tag='tag2'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%savepoint_support='ENABLED'; +set%statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set savepoint_support='ENABLED'; +_set statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='ENABLED'_; +set statement_tag='tag2'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_savepoint_support='ENABLED'; +set_statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set savepoint_support='ENABLED'; +&set statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='ENABLED'&; +set statement_tag='tag2'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&savepoint_support='ENABLED'; +set&statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set savepoint_support='ENABLED'; +$set statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='ENABLED'$; +set statement_tag='tag2'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$savepoint_support='ENABLED'; +set$statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set savepoint_support='ENABLED'; +@set statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='ENABLED'@; +set statement_tag='tag2'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@savepoint_support='ENABLED'; +set@statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set savepoint_support='ENABLED'; +!set statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='ENABLED'!; +set statement_tag='tag2'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!savepoint_support='ENABLED'; +set!statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set savepoint_support='ENABLED'; +*set statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='ENABLED'*; +set statement_tag='tag2'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*savepoint_support='ENABLED'; +set*statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set savepoint_support='ENABLED'; +(set statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='ENABLED'(; +set statement_tag='tag2'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(savepoint_support='ENABLED'; +set(statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set savepoint_support='ENABLED'; +)set statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='ENABLED'); +set statement_tag='tag2'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)savepoint_support='ENABLED'; +set)statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set savepoint_support='ENABLED'; +-set statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='ENABLED'-; +set statement_tag='tag2'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-savepoint_support='ENABLED'; +set-statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set savepoint_support='ENABLED'; ++set statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='ENABLED'+; +set statement_tag='tag2'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+savepoint_support='ENABLED'; +set+statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set savepoint_support='ENABLED'; +-#set statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='ENABLED'-#; +set statement_tag='tag2'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#savepoint_support='ENABLED'; +set-#statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set savepoint_support='ENABLED'; +/set statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='ENABLED'/; +set statement_tag='tag2'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/savepoint_support='ENABLED'; +set/statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set savepoint_support='ENABLED'; +\set statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='ENABLED'\; +set statement_tag='tag2'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\savepoint_support='ENABLED'; +set\statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set savepoint_support='ENABLED'; +?set statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='ENABLED'?; +set statement_tag='tag2'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?savepoint_support='ENABLED'; +set?statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set savepoint_support='ENABLED'; +-/set statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='ENABLED'-/; +set statement_tag='tag2'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/savepoint_support='ENABLED'; +set-/statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set savepoint_support='ENABLED'; +/#set statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='ENABLED'/#; +set statement_tag='tag2'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#savepoint_support='ENABLED'; +set/#statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set savepoint_support='ENABLED'; +/-set statement_tag='tag2'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='ENABLED'/-; +set statement_tag='tag2'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-savepoint_support='ENABLED'; +set/-statement_tag='tag2'; NEW_CONNECTION; -set savepoint_support='FAIL_AFTER_ROLLBACK'; +set statement_tag=''; NEW_CONNECTION; -SET SAVEPOINT_SUPPORT='FAIL_AFTER_ROLLBACK'; +SET STATEMENT_TAG=''; NEW_CONNECTION; -set savepoint_support='fail_after_rollback'; +set statement_tag=''; NEW_CONNECTION; - set savepoint_support='FAIL_AFTER_ROLLBACK'; + set statement_tag=''; NEW_CONNECTION; - set savepoint_support='FAIL_AFTER_ROLLBACK'; + set statement_tag=''; NEW_CONNECTION; -set savepoint_support='FAIL_AFTER_ROLLBACK'; +set statement_tag=''; NEW_CONNECTION; -set savepoint_support='FAIL_AFTER_ROLLBACK' ; +set statement_tag='' ; NEW_CONNECTION; -set savepoint_support='FAIL_AFTER_ROLLBACK' ; +set statement_tag='' ; NEW_CONNECTION; -set savepoint_support='FAIL_AFTER_ROLLBACK' +set statement_tag='' ; NEW_CONNECTION; -set savepoint_support='FAIL_AFTER_ROLLBACK'; +set statement_tag=''; NEW_CONNECTION; -set savepoint_support='FAIL_AFTER_ROLLBACK'; +set statement_tag=''; NEW_CONNECTION; set -savepoint_support='FAIL_AFTER_ROLLBACK'; +statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set savepoint_support='FAIL_AFTER_ROLLBACK'; +foo set statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='FAIL_AFTER_ROLLBACK' bar; +set statement_tag='' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set savepoint_support='FAIL_AFTER_ROLLBACK'; +%set statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='FAIL_AFTER_ROLLBACK'%; +set statement_tag=''%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%savepoint_support='FAIL_AFTER_ROLLBACK'; +set%statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set savepoint_support='FAIL_AFTER_ROLLBACK'; +_set statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='FAIL_AFTER_ROLLBACK'_; +set statement_tag=''_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_savepoint_support='FAIL_AFTER_ROLLBACK'; +set_statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set savepoint_support='FAIL_AFTER_ROLLBACK'; +&set statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='FAIL_AFTER_ROLLBACK'&; +set statement_tag=''&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&savepoint_support='FAIL_AFTER_ROLLBACK'; +set&statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set savepoint_support='FAIL_AFTER_ROLLBACK'; +$set statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='FAIL_AFTER_ROLLBACK'$; +set statement_tag=''$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$savepoint_support='FAIL_AFTER_ROLLBACK'; +set$statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set savepoint_support='FAIL_AFTER_ROLLBACK'; +@set statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='FAIL_AFTER_ROLLBACK'@; +set statement_tag=''@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@savepoint_support='FAIL_AFTER_ROLLBACK'; +set@statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set savepoint_support='FAIL_AFTER_ROLLBACK'; +!set statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='FAIL_AFTER_ROLLBACK'!; +set statement_tag=''!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!savepoint_support='FAIL_AFTER_ROLLBACK'; +set!statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set savepoint_support='FAIL_AFTER_ROLLBACK'; +*set statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='FAIL_AFTER_ROLLBACK'*; +set statement_tag=''*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*savepoint_support='FAIL_AFTER_ROLLBACK'; +set*statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set savepoint_support='FAIL_AFTER_ROLLBACK'; +(set statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='FAIL_AFTER_ROLLBACK'(; +set statement_tag=''(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(savepoint_support='FAIL_AFTER_ROLLBACK'; +set(statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set savepoint_support='FAIL_AFTER_ROLLBACK'; +)set statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='FAIL_AFTER_ROLLBACK'); +set statement_tag=''); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)savepoint_support='FAIL_AFTER_ROLLBACK'; +set)statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set savepoint_support='FAIL_AFTER_ROLLBACK'; +-set statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='FAIL_AFTER_ROLLBACK'-; +set statement_tag=''-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-savepoint_support='FAIL_AFTER_ROLLBACK'; +set-statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set savepoint_support='FAIL_AFTER_ROLLBACK'; ++set statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='FAIL_AFTER_ROLLBACK'+; +set statement_tag=''+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+savepoint_support='FAIL_AFTER_ROLLBACK'; +set+statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set savepoint_support='FAIL_AFTER_ROLLBACK'; +-#set statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='FAIL_AFTER_ROLLBACK'-#; +set statement_tag=''-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#savepoint_support='FAIL_AFTER_ROLLBACK'; +set-#statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set savepoint_support='FAIL_AFTER_ROLLBACK'; +/set statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='FAIL_AFTER_ROLLBACK'/; +set statement_tag=''/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/savepoint_support='FAIL_AFTER_ROLLBACK'; +set/statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set savepoint_support='FAIL_AFTER_ROLLBACK'; +\set statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='FAIL_AFTER_ROLLBACK'\; +set statement_tag=''\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\savepoint_support='FAIL_AFTER_ROLLBACK'; +set\statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set savepoint_support='FAIL_AFTER_ROLLBACK'; +?set statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='FAIL_AFTER_ROLLBACK'?; +set statement_tag=''?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?savepoint_support='FAIL_AFTER_ROLLBACK'; +set?statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set savepoint_support='FAIL_AFTER_ROLLBACK'; +-/set statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='FAIL_AFTER_ROLLBACK'-/; +set statement_tag=''-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/savepoint_support='FAIL_AFTER_ROLLBACK'; +set-/statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set savepoint_support='FAIL_AFTER_ROLLBACK'; +/#set statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='FAIL_AFTER_ROLLBACK'/#; +set statement_tag=''/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#savepoint_support='FAIL_AFTER_ROLLBACK'; +set/#statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set savepoint_support='FAIL_AFTER_ROLLBACK'; +/-set statement_tag=''; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='FAIL_AFTER_ROLLBACK'/-; +set statement_tag=''/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-savepoint_support='FAIL_AFTER_ROLLBACK'; +set/-statement_tag=''; NEW_CONNECTION; -set savepoint_support='DISABLED'; +set statement_tag='test_tag'; NEW_CONNECTION; -SET SAVEPOINT_SUPPORT='DISABLED'; +SET STATEMENT_TAG='TEST_TAG'; NEW_CONNECTION; -set savepoint_support='disabled'; +set statement_tag='test_tag'; NEW_CONNECTION; - set savepoint_support='DISABLED'; + set statement_tag='test_tag'; NEW_CONNECTION; - set savepoint_support='DISABLED'; + set statement_tag='test_tag'; NEW_CONNECTION; -set savepoint_support='DISABLED'; +set statement_tag='test_tag'; NEW_CONNECTION; -set savepoint_support='DISABLED' ; +set statement_tag='test_tag' ; NEW_CONNECTION; -set savepoint_support='DISABLED' ; +set statement_tag='test_tag' ; NEW_CONNECTION; -set savepoint_support='DISABLED' +set statement_tag='test_tag' ; NEW_CONNECTION; -set savepoint_support='DISABLED'; +set statement_tag='test_tag'; NEW_CONNECTION; -set savepoint_support='DISABLED'; +set statement_tag='test_tag'; NEW_CONNECTION; set -savepoint_support='DISABLED'; +statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set savepoint_support='DISABLED'; +foo set statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='DISABLED' bar; +set statement_tag='test_tag' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set savepoint_support='DISABLED'; +%set statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='DISABLED'%; +set statement_tag='test_tag'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%savepoint_support='DISABLED'; +set%statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set savepoint_support='DISABLED'; +_set statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='DISABLED'_; +set statement_tag='test_tag'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_savepoint_support='DISABLED'; +set_statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set savepoint_support='DISABLED'; +&set statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='DISABLED'&; +set statement_tag='test_tag'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&savepoint_support='DISABLED'; +set&statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set savepoint_support='DISABLED'; +$set statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='DISABLED'$; +set statement_tag='test_tag'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$savepoint_support='DISABLED'; +set$statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set savepoint_support='DISABLED'; +@set statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='DISABLED'@; +set statement_tag='test_tag'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@savepoint_support='DISABLED'; +set@statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set savepoint_support='DISABLED'; +!set statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='DISABLED'!; +set statement_tag='test_tag'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!savepoint_support='DISABLED'; +set!statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set savepoint_support='DISABLED'; +*set statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='DISABLED'*; +set statement_tag='test_tag'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*savepoint_support='DISABLED'; +set*statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set savepoint_support='DISABLED'; +(set statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='DISABLED'(; +set statement_tag='test_tag'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(savepoint_support='DISABLED'; +set(statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set savepoint_support='DISABLED'; +)set statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='DISABLED'); +set statement_tag='test_tag'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)savepoint_support='DISABLED'; +set)statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set savepoint_support='DISABLED'; +-set statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='DISABLED'-; +set statement_tag='test_tag'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-savepoint_support='DISABLED'; +set-statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set savepoint_support='DISABLED'; ++set statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='DISABLED'+; +set statement_tag='test_tag'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+savepoint_support='DISABLED'; +set+statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set savepoint_support='DISABLED'; +-#set statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='DISABLED'-#; +set statement_tag='test_tag'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#savepoint_support='DISABLED'; +set-#statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set savepoint_support='DISABLED'; +/set statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='DISABLED'/; +set statement_tag='test_tag'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/savepoint_support='DISABLED'; +set/statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set savepoint_support='DISABLED'; +\set statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='DISABLED'\; +set statement_tag='test_tag'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\savepoint_support='DISABLED'; +set\statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set savepoint_support='DISABLED'; +?set statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='DISABLED'?; +set statement_tag='test_tag'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?savepoint_support='DISABLED'; +set?statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set savepoint_support='DISABLED'; +-/set statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='DISABLED'-/; +set statement_tag='test_tag'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/savepoint_support='DISABLED'; +set-/statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set savepoint_support='DISABLED'; +/#set statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='DISABLED'/#; +set statement_tag='test_tag'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#savepoint_support='DISABLED'; +set/#statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set savepoint_support='DISABLED'; +/-set statement_tag='test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set savepoint_support='DISABLED'/-; +set statement_tag='test_tag'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-savepoint_support='DISABLED'; +set/-statement_tag='test_tag'; NEW_CONNECTION; -set delay_transaction_start_until_first_write = true; +set autocommit = false; +set transaction_tag='tag1'; NEW_CONNECTION; -SET DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE = TRUE; +set autocommit = false; +SET TRANSACTION_TAG='TAG1'; NEW_CONNECTION; -set delay_transaction_start_until_first_write = true; +set autocommit = false; +set transaction_tag='tag1'; NEW_CONNECTION; - set delay_transaction_start_until_first_write = true; +set autocommit = false; + set transaction_tag='tag1'; NEW_CONNECTION; - set delay_transaction_start_until_first_write = true; +set autocommit = false; + set transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; -set delay_transaction_start_until_first_write = true; +set transaction_tag='tag1'; NEW_CONNECTION; -set delay_transaction_start_until_first_write = true ; +set autocommit = false; +set transaction_tag='tag1' ; NEW_CONNECTION; -set delay_transaction_start_until_first_write = true ; +set autocommit = false; +set transaction_tag='tag1' ; NEW_CONNECTION; -set delay_transaction_start_until_first_write = true +set autocommit = false; +set transaction_tag='tag1' ; NEW_CONNECTION; -set delay_transaction_start_until_first_write = true; +set autocommit = false; +set transaction_tag='tag1'; NEW_CONNECTION; -set delay_transaction_start_until_first_write = true; +set autocommit = false; +set transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; set -delay_transaction_start_until_first_write -= -true; +transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set delay_transaction_start_until_first_write = true; +foo set transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = true bar; +set transaction_tag='tag1' bar; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -%set delay_transaction_start_until_first_write = true; +%set transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = true%; +set transaction_tag='tag1'%; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =%true; +set%transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -_set delay_transaction_start_until_first_write = true; +_set transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = true_; +set transaction_tag='tag1'_; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =_true; +set_transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -&set delay_transaction_start_until_first_write = true; +&set transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = true&; +set transaction_tag='tag1'&; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =&true; +set&transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -$set delay_transaction_start_until_first_write = true; +$set transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = true$; +set transaction_tag='tag1'$; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =$true; +set$transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -@set delay_transaction_start_until_first_write = true; +@set transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = true@; +set transaction_tag='tag1'@; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =@true; +set@transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -!set delay_transaction_start_until_first_write = true; +!set transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = true!; +set transaction_tag='tag1'!; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =!true; +set!transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -*set delay_transaction_start_until_first_write = true; +*set transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = true*; +set transaction_tag='tag1'*; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =*true; +set*transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -(set delay_transaction_start_until_first_write = true; +(set transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = true(; +set transaction_tag='tag1'(; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =(true; +set(transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -)set delay_transaction_start_until_first_write = true; +)set transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = true); +set transaction_tag='tag1'); NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =)true; +set)transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --set delay_transaction_start_until_first_write = true; +-set transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = true-; +set transaction_tag='tag1'-; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =-true; +set-transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -+set delay_transaction_start_until_first_write = true; ++set transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = true+; +set transaction_tag='tag1'+; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =+true; +set+transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --#set delay_transaction_start_until_first_write = true; +-#set transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = true-#; +set transaction_tag='tag1'-#; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =-#true; +set-#transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/set delay_transaction_start_until_first_write = true; +/set transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = true/; +set transaction_tag='tag1'/; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =/true; +set/transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -\set delay_transaction_start_until_first_write = true; +\set transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = true\; +set transaction_tag='tag1'\; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =\true; +set\transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -?set delay_transaction_start_until_first_write = true; +?set transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = true?; +set transaction_tag='tag1'?; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =?true; +set?transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --/set delay_transaction_start_until_first_write = true; +-/set transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = true-/; +set transaction_tag='tag1'-/; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =-/true; +set-/transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set delay_transaction_start_until_first_write = true; +/#set transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = true/#; +set transaction_tag='tag1'/#; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =/#true; +set/#transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set delay_transaction_start_until_first_write = true; +/-set transaction_tag='tag1'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = true/-; +set transaction_tag='tag1'/-; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =/-true; +set/-transaction_tag='tag1'; NEW_CONNECTION; -set delay_transaction_start_until_first_write = false; +set autocommit = false; +set transaction_tag='tag2'; NEW_CONNECTION; -SET DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE = FALSE; +set autocommit = false; +SET TRANSACTION_TAG='TAG2'; NEW_CONNECTION; -set delay_transaction_start_until_first_write = false; +set autocommit = false; +set transaction_tag='tag2'; NEW_CONNECTION; - set delay_transaction_start_until_first_write = false; +set autocommit = false; + set transaction_tag='tag2'; NEW_CONNECTION; - set delay_transaction_start_until_first_write = false; +set autocommit = false; + set transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; -set delay_transaction_start_until_first_write = false; +set transaction_tag='tag2'; NEW_CONNECTION; -set delay_transaction_start_until_first_write = false ; +set autocommit = false; +set transaction_tag='tag2' ; NEW_CONNECTION; -set delay_transaction_start_until_first_write = false ; +set autocommit = false; +set transaction_tag='tag2' ; NEW_CONNECTION; -set delay_transaction_start_until_first_write = false +set autocommit = false; +set transaction_tag='tag2' ; NEW_CONNECTION; -set delay_transaction_start_until_first_write = false; +set autocommit = false; +set transaction_tag='tag2'; NEW_CONNECTION; -set delay_transaction_start_until_first_write = false; +set autocommit = false; +set transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; set -delay_transaction_start_until_first_write -= -false; +transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set delay_transaction_start_until_first_write = false; +foo set transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = false bar; +set transaction_tag='tag2' bar; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -%set delay_transaction_start_until_first_write = false; +%set transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = false%; +set transaction_tag='tag2'%; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =%false; +set%transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -_set delay_transaction_start_until_first_write = false; +_set transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = false_; +set transaction_tag='tag2'_; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =_false; +set_transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -&set delay_transaction_start_until_first_write = false; +&set transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = false&; +set transaction_tag='tag2'&; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =&false; +set&transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -$set delay_transaction_start_until_first_write = false; +$set transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = false$; +set transaction_tag='tag2'$; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =$false; +set$transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -@set delay_transaction_start_until_first_write = false; +@set transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = false@; +set transaction_tag='tag2'@; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =@false; +set@transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -!set delay_transaction_start_until_first_write = false; +!set transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = false!; +set transaction_tag='tag2'!; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =!false; +set!transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -*set delay_transaction_start_until_first_write = false; +*set transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = false*; +set transaction_tag='tag2'*; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =*false; +set*transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -(set delay_transaction_start_until_first_write = false; +(set transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = false(; +set transaction_tag='tag2'(; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =(false; +set(transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -)set delay_transaction_start_until_first_write = false; +)set transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = false); +set transaction_tag='tag2'); NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =)false; +set)transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --set delay_transaction_start_until_first_write = false; +-set transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = false-; +set transaction_tag='tag2'-; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =-false; +set-transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -+set delay_transaction_start_until_first_write = false; ++set transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = false+; +set transaction_tag='tag2'+; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =+false; +set+transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --#set delay_transaction_start_until_first_write = false; +-#set transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = false-#; +set transaction_tag='tag2'-#; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =-#false; +set-#transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/set delay_transaction_start_until_first_write = false; +/set transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = false/; +set transaction_tag='tag2'/; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =/false; +set/transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -\set delay_transaction_start_until_first_write = false; +\set transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = false\; +set transaction_tag='tag2'\; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =\false; +set\transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -?set delay_transaction_start_until_first_write = false; +?set transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = false?; +set transaction_tag='tag2'?; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =?false; +set?transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --/set delay_transaction_start_until_first_write = false; +-/set transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = false-/; +set transaction_tag='tag2'-/; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =-/false; +set-/transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set delay_transaction_start_until_first_write = false; +/#set transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = false/#; +set transaction_tag='tag2'/#; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =/#false; +set/#transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set delay_transaction_start_until_first_write = false; +/-set transaction_tag='tag2'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write = false/-; +set transaction_tag='tag2'/-; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set delay_transaction_start_until_first_write =/-false; +set/-transaction_tag='tag2'; NEW_CONNECTION; -show variable data_boost_enabled; +set autocommit = false; +set transaction_tag=''; NEW_CONNECTION; -SHOW VARIABLE DATA_BOOST_ENABLED; +set autocommit = false; +SET TRANSACTION_TAG=''; NEW_CONNECTION; -show variable data_boost_enabled; +set autocommit = false; +set transaction_tag=''; NEW_CONNECTION; - show variable data_boost_enabled; +set autocommit = false; + set transaction_tag=''; NEW_CONNECTION; - show variable data_boost_enabled; +set autocommit = false; + set transaction_tag=''; NEW_CONNECTION; +set autocommit = false; -show variable data_boost_enabled; +set transaction_tag=''; NEW_CONNECTION; -show variable data_boost_enabled ; +set autocommit = false; +set transaction_tag='' ; NEW_CONNECTION; -show variable data_boost_enabled ; +set autocommit = false; +set transaction_tag='' ; NEW_CONNECTION; -show variable data_boost_enabled +set autocommit = false; +set transaction_tag='' ; NEW_CONNECTION; -show variable data_boost_enabled; +set autocommit = false; +set transaction_tag=''; NEW_CONNECTION; -show variable data_boost_enabled; +set autocommit = false; +set transaction_tag=''; NEW_CONNECTION; -show -variable -data_boost_enabled; +set autocommit = false; +set +transaction_tag=''; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -foo show variable data_boost_enabled; -NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable data_boost_enabled bar; +foo set transaction_tag=''; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -%show variable data_boost_enabled; +set transaction_tag='' bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag=''/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +SET TRANSACTION_TAG='TEST_TAG'; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; + set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; + set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; + + + +set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='test_tag' ; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='test_tag' ; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='test_tag' + +; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +set +transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag' bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set transaction_tag='test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set transaction_tag='test_tag'/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-transaction_tag='test_tag'; +NEW_CONNECTION; +set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +SET EXCLUDE_TXN_FROM_CHANGE_STREAMS = TRUE; +NEW_CONNECTION; +set exclude_txn_from_change_streams = true; +NEW_CONNECTION; + set exclude_txn_from_change_streams = true; +NEW_CONNECTION; + set exclude_txn_from_change_streams = true; +NEW_CONNECTION; + + + +set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +set exclude_txn_from_change_streams = true ; +NEW_CONNECTION; +set exclude_txn_from_change_streams = true ; +NEW_CONNECTION; +set exclude_txn_from_change_streams = true + +; +NEW_CONNECTION; +set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +set +exclude_txn_from_change_streams += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =/-true; +NEW_CONNECTION; +set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +SET EXCLUDE_TXN_FROM_CHANGE_STREAMS = FALSE; +NEW_CONNECTION; +set exclude_txn_from_change_streams = false; +NEW_CONNECTION; + set exclude_txn_from_change_streams = false; +NEW_CONNECTION; + set exclude_txn_from_change_streams = false; +NEW_CONNECTION; + + + +set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +set exclude_txn_from_change_streams = false ; +NEW_CONNECTION; +set exclude_txn_from_change_streams = false ; +NEW_CONNECTION; +set exclude_txn_from_change_streams = false + +; +NEW_CONNECTION; +set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +set +exclude_txn_from_change_streams += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set exclude_txn_from_change_streams = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set exclude_txn_from_change_streams =/-false; +NEW_CONNECTION; +set rpc_priority='HIGH'; +NEW_CONNECTION; +SET RPC_PRIORITY='HIGH'; +NEW_CONNECTION; +set rpc_priority='high'; +NEW_CONNECTION; + set rpc_priority='HIGH'; +NEW_CONNECTION; + set rpc_priority='HIGH'; +NEW_CONNECTION; + + + +set rpc_priority='HIGH'; +NEW_CONNECTION; +set rpc_priority='HIGH' ; +NEW_CONNECTION; +set rpc_priority='HIGH' ; +NEW_CONNECTION; +set rpc_priority='HIGH' + +; +NEW_CONNECTION; +set rpc_priority='HIGH'; +NEW_CONNECTION; +set rpc_priority='HIGH'; +NEW_CONNECTION; +set +rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set rpc_priority='HIGH'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='HIGH'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-rpc_priority='HIGH'; +NEW_CONNECTION; +set rpc_priority='MEDIUM'; +NEW_CONNECTION; +SET RPC_PRIORITY='MEDIUM'; +NEW_CONNECTION; +set rpc_priority='medium'; +NEW_CONNECTION; + set rpc_priority='MEDIUM'; +NEW_CONNECTION; + set rpc_priority='MEDIUM'; +NEW_CONNECTION; + + + +set rpc_priority='MEDIUM'; +NEW_CONNECTION; +set rpc_priority='MEDIUM' ; +NEW_CONNECTION; +set rpc_priority='MEDIUM' ; +NEW_CONNECTION; +set rpc_priority='MEDIUM' + +; +NEW_CONNECTION; +set rpc_priority='MEDIUM'; +NEW_CONNECTION; +set rpc_priority='MEDIUM'; +NEW_CONNECTION; +set +rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set rpc_priority='MEDIUM'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='MEDIUM'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-rpc_priority='MEDIUM'; +NEW_CONNECTION; +set rpc_priority='LOW'; +NEW_CONNECTION; +SET RPC_PRIORITY='LOW'; +NEW_CONNECTION; +set rpc_priority='low'; +NEW_CONNECTION; + set rpc_priority='LOW'; +NEW_CONNECTION; + set rpc_priority='LOW'; +NEW_CONNECTION; + + + +set rpc_priority='LOW'; +NEW_CONNECTION; +set rpc_priority='LOW' ; +NEW_CONNECTION; +set rpc_priority='LOW' ; +NEW_CONNECTION; +set rpc_priority='LOW' + +; +NEW_CONNECTION; +set rpc_priority='LOW'; +NEW_CONNECTION; +set rpc_priority='LOW'; +NEW_CONNECTION; +set +rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set rpc_priority='LOW'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='LOW'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-rpc_priority='LOW'; +NEW_CONNECTION; +set rpc_priority='NULL'; +NEW_CONNECTION; +SET RPC_PRIORITY='NULL'; +NEW_CONNECTION; +set rpc_priority='null'; +NEW_CONNECTION; + set rpc_priority='NULL'; +NEW_CONNECTION; + set rpc_priority='NULL'; +NEW_CONNECTION; + + + +set rpc_priority='NULL'; +NEW_CONNECTION; +set rpc_priority='NULL' ; +NEW_CONNECTION; +set rpc_priority='NULL' ; +NEW_CONNECTION; +set rpc_priority='NULL' + +; +NEW_CONNECTION; +set rpc_priority='NULL'; +NEW_CONNECTION; +set rpc_priority='NULL'; +NEW_CONNECTION; +set +rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set rpc_priority='NULL'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set rpc_priority='NULL'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-rpc_priority='NULL'; +NEW_CONNECTION; +set savepoint_support='ENABLED'; +NEW_CONNECTION; +SET SAVEPOINT_SUPPORT='ENABLED'; +NEW_CONNECTION; +set savepoint_support='enabled'; +NEW_CONNECTION; + set savepoint_support='ENABLED'; +NEW_CONNECTION; + set savepoint_support='ENABLED'; +NEW_CONNECTION; + + + +set savepoint_support='ENABLED'; +NEW_CONNECTION; +set savepoint_support='ENABLED' ; +NEW_CONNECTION; +set savepoint_support='ENABLED' ; +NEW_CONNECTION; +set savepoint_support='ENABLED' + +; +NEW_CONNECTION; +set savepoint_support='ENABLED'; +NEW_CONNECTION; +set savepoint_support='ENABLED'; +NEW_CONNECTION; +set +savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set savepoint_support='ENABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='ENABLED'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-savepoint_support='ENABLED'; +NEW_CONNECTION; +set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +SET SAVEPOINT_SUPPORT='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +set savepoint_support='fail_after_rollback'; +NEW_CONNECTION; + set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; + set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; + + + +set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +set savepoint_support='FAIL_AFTER_ROLLBACK' ; +NEW_CONNECTION; +set savepoint_support='FAIL_AFTER_ROLLBACK' ; +NEW_CONNECTION; +set savepoint_support='FAIL_AFTER_ROLLBACK' + +; +NEW_CONNECTION; +set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +set +savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='FAIL_AFTER_ROLLBACK'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-savepoint_support='FAIL_AFTER_ROLLBACK'; +NEW_CONNECTION; +set savepoint_support='DISABLED'; +NEW_CONNECTION; +SET SAVEPOINT_SUPPORT='DISABLED'; +NEW_CONNECTION; +set savepoint_support='disabled'; +NEW_CONNECTION; + set savepoint_support='DISABLED'; +NEW_CONNECTION; + set savepoint_support='DISABLED'; +NEW_CONNECTION; + + + +set savepoint_support='DISABLED'; +NEW_CONNECTION; +set savepoint_support='DISABLED' ; +NEW_CONNECTION; +set savepoint_support='DISABLED' ; +NEW_CONNECTION; +set savepoint_support='DISABLED' + +; +NEW_CONNECTION; +set savepoint_support='DISABLED'; +NEW_CONNECTION; +set savepoint_support='DISABLED'; +NEW_CONNECTION; +set +savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set savepoint_support='DISABLED'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set savepoint_support='DISABLED'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-savepoint_support='DISABLED'; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +SET DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE = TRUE; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; + set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; + set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; + + + +set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = true ; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = true ; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = true + +; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +set +delay_transaction_start_until_first_write += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set delay_transaction_start_until_first_write = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =/-true; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +SET DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE = FALSE; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; + set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; + set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; + + + +set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = false ; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = false ; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = false + +; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +set +delay_transaction_start_until_first_write += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set delay_transaction_start_until_first_write = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set delay_transaction_start_until_first_write =/-false; +NEW_CONNECTION; +set keep_transaction_alive = true; +NEW_CONNECTION; +SET KEEP_TRANSACTION_ALIVE = TRUE; +NEW_CONNECTION; +set keep_transaction_alive = true; +NEW_CONNECTION; + set keep_transaction_alive = true; +NEW_CONNECTION; + set keep_transaction_alive = true; +NEW_CONNECTION; + + + +set keep_transaction_alive = true; +NEW_CONNECTION; +set keep_transaction_alive = true ; +NEW_CONNECTION; +set keep_transaction_alive = true ; +NEW_CONNECTION; +set keep_transaction_alive = true + +; +NEW_CONNECTION; +set keep_transaction_alive = true; +NEW_CONNECTION; +set keep_transaction_alive = true; +NEW_CONNECTION; +set +keep_transaction_alive += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set keep_transaction_alive = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =/-true; +NEW_CONNECTION; +set keep_transaction_alive = false; +NEW_CONNECTION; +SET KEEP_TRANSACTION_ALIVE = FALSE; +NEW_CONNECTION; +set keep_transaction_alive = false; +NEW_CONNECTION; + set keep_transaction_alive = false; +NEW_CONNECTION; + set keep_transaction_alive = false; +NEW_CONNECTION; + + + +set keep_transaction_alive = false; +NEW_CONNECTION; +set keep_transaction_alive = false ; +NEW_CONNECTION; +set keep_transaction_alive = false ; +NEW_CONNECTION; +set keep_transaction_alive = false + +; +NEW_CONNECTION; +set keep_transaction_alive = false; +NEW_CONNECTION; +set keep_transaction_alive = false; +NEW_CONNECTION; +set +keep_transaction_alive += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set keep_transaction_alive = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set keep_transaction_alive =/-false; +NEW_CONNECTION; +show variable data_boost_enabled; +NEW_CONNECTION; +SHOW VARIABLE DATA_BOOST_ENABLED; +NEW_CONNECTION; +show variable data_boost_enabled; +NEW_CONNECTION; + show variable data_boost_enabled; +NEW_CONNECTION; + show variable data_boost_enabled; +NEW_CONNECTION; + + + +show variable data_boost_enabled; +NEW_CONNECTION; +show variable data_boost_enabled ; +NEW_CONNECTION; +show variable data_boost_enabled ; +NEW_CONNECTION; +show variable data_boost_enabled + +; +NEW_CONNECTION; +show variable data_boost_enabled; +NEW_CONNECTION; +show variable data_boost_enabled; +NEW_CONNECTION; +show +variable +data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable data_boost_enabled; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable data_boost_enabled/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-data_boost_enabled; +NEW_CONNECTION; +set data_boost_enabled = true; +NEW_CONNECTION; +SET DATA_BOOST_ENABLED = TRUE; +NEW_CONNECTION; +set data_boost_enabled = true; +NEW_CONNECTION; + set data_boost_enabled = true; +NEW_CONNECTION; + set data_boost_enabled = true; +NEW_CONNECTION; + + + +set data_boost_enabled = true; +NEW_CONNECTION; +set data_boost_enabled = true ; +NEW_CONNECTION; +set data_boost_enabled = true ; +NEW_CONNECTION; +set data_boost_enabled = true + +; +NEW_CONNECTION; +set data_boost_enabled = true; +NEW_CONNECTION; +set data_boost_enabled = true; +NEW_CONNECTION; +set +data_boost_enabled += +true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =%true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =_true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =&true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set data_boost_enabled = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =/-true; +NEW_CONNECTION; +set data_boost_enabled = false; +NEW_CONNECTION; +SET DATA_BOOST_ENABLED = FALSE; +NEW_CONNECTION; +set data_boost_enabled = false; +NEW_CONNECTION; + set data_boost_enabled = false; +NEW_CONNECTION; + set data_boost_enabled = false; +NEW_CONNECTION; + + + +set data_boost_enabled = false; +NEW_CONNECTION; +set data_boost_enabled = false ; +NEW_CONNECTION; +set data_boost_enabled = false ; +NEW_CONNECTION; +set data_boost_enabled = false + +; +NEW_CONNECTION; +set data_boost_enabled = false; +NEW_CONNECTION; +set data_boost_enabled = false; +NEW_CONNECTION; +set +data_boost_enabled += +false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =%false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =_false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =&false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =$false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =@false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =!false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =*false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =(false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =)false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =-false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =+false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =-#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =\false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =?false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =-/false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =/#false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set data_boost_enabled = false; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled = false/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set data_boost_enabled =/-false; +NEW_CONNECTION; +show variable auto_partition_mode; +NEW_CONNECTION; +SHOW VARIABLE AUTO_PARTITION_MODE; +NEW_CONNECTION; +show variable auto_partition_mode; +NEW_CONNECTION; + show variable auto_partition_mode; +NEW_CONNECTION; + show variable auto_partition_mode; +NEW_CONNECTION; + + + +show variable auto_partition_mode; +NEW_CONNECTION; +show variable auto_partition_mode ; +NEW_CONNECTION; +show variable auto_partition_mode ; +NEW_CONNECTION; +show variable auto_partition_mode + +; +NEW_CONNECTION; +show variable auto_partition_mode; +NEW_CONNECTION; +show variable auto_partition_mode; +NEW_CONNECTION; +show +variable +auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable auto_partition_mode+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+auto_partition_mode; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable auto_partition_mode; NEW_CONNECTION; @EXPECT EXCEPTION UNIMPLEMENTED -show variable data_boost_enabled%; +show variable auto_partition_mode-#; NEW_CONNECTION; @EXPECT EXCEPTION UNIMPLEMENTED -show variable%data_boost_enabled; +show variable-#auto_partition_mode; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_show variable data_boost_enabled; +/show variable auto_partition_mode; NEW_CONNECTION; @EXPECT EXCEPTION UNIMPLEMENTED -show variable data_boost_enabled_; +show variable auto_partition_mode/; NEW_CONNECTION; @EXPECT EXCEPTION UNIMPLEMENTED -show variable_data_boost_enabled; +show variable/auto_partition_mode; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&show variable data_boost_enabled; +\show variable auto_partition_mode; NEW_CONNECTION; @EXPECT EXCEPTION UNIMPLEMENTED -show variable data_boost_enabled&; +show variable auto_partition_mode\; NEW_CONNECTION; @EXPECT EXCEPTION UNIMPLEMENTED -show variable&data_boost_enabled; +show variable\auto_partition_mode; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$show variable data_boost_enabled; +?show variable auto_partition_mode; NEW_CONNECTION; @EXPECT EXCEPTION UNIMPLEMENTED -show variable data_boost_enabled$; +show variable auto_partition_mode?; NEW_CONNECTION; @EXPECT EXCEPTION UNIMPLEMENTED -show variable$data_boost_enabled; +show variable?auto_partition_mode; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@show variable data_boost_enabled; +-/show variable auto_partition_mode; NEW_CONNECTION; @EXPECT EXCEPTION UNIMPLEMENTED -show variable data_boost_enabled@; +show variable auto_partition_mode-/; NEW_CONNECTION; @EXPECT EXCEPTION UNIMPLEMENTED -show variable@data_boost_enabled; +show variable-/auto_partition_mode; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!show variable data_boost_enabled; +/#show variable auto_partition_mode; NEW_CONNECTION; @EXPECT EXCEPTION UNIMPLEMENTED -show variable data_boost_enabled!; +show variable auto_partition_mode/#; NEW_CONNECTION; @EXPECT EXCEPTION UNIMPLEMENTED -show variable!data_boost_enabled; +show variable/#auto_partition_mode; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*show variable data_boost_enabled; +/-show variable auto_partition_mode; NEW_CONNECTION; @EXPECT EXCEPTION UNIMPLEMENTED -show variable data_boost_enabled*; +show variable auto_partition_mode/-; NEW_CONNECTION; @EXPECT EXCEPTION UNIMPLEMENTED -show variable*data_boost_enabled; +show variable/-auto_partition_mode; +NEW_CONNECTION; +set auto_partition_mode = true; +NEW_CONNECTION; +SET AUTO_PARTITION_MODE = TRUE; +NEW_CONNECTION; +set auto_partition_mode = true; +NEW_CONNECTION; + set auto_partition_mode = true; +NEW_CONNECTION; + set auto_partition_mode = true; +NEW_CONNECTION; + + + +set auto_partition_mode = true; +NEW_CONNECTION; +set auto_partition_mode = true ; +NEW_CONNECTION; +set auto_partition_mode = true ; +NEW_CONNECTION; +set auto_partition_mode = true + +; +NEW_CONNECTION; +set auto_partition_mode = true; +NEW_CONNECTION; +set auto_partition_mode = true; +NEW_CONNECTION; +set +auto_partition_mode += +true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(show variable data_boost_enabled; +foo set auto_partition_mode = true; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable data_boost_enabled(; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true bar; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable(data_boost_enabled; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set auto_partition_mode = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)show variable data_boost_enabled; +set auto_partition_mode = true%; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable data_boost_enabled); +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =%true; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable)data_boost_enabled; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set auto_partition_mode = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --show variable data_boost_enabled; +set auto_partition_mode = true_; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable data_boost_enabled-; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =_true; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable-data_boost_enabled; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set auto_partition_mode = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+show variable data_boost_enabled; +set auto_partition_mode = true&; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable data_boost_enabled+; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =&true; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable+data_boost_enabled; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set auto_partition_mode = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#show variable data_boost_enabled; +set auto_partition_mode = true$; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable data_boost_enabled-#; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =$true; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable-#data_boost_enabled; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set auto_partition_mode = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/show variable data_boost_enabled; +set auto_partition_mode = true@; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable data_boost_enabled/; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =@true; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable/data_boost_enabled; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set auto_partition_mode = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\show variable data_boost_enabled; +set auto_partition_mode = true!; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable data_boost_enabled\; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =!true; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable\data_boost_enabled; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set auto_partition_mode = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?show variable data_boost_enabled; +set auto_partition_mode = true*; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable data_boost_enabled?; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =*true; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable?data_boost_enabled; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set auto_partition_mode = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/show variable data_boost_enabled; +set auto_partition_mode = true(; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable data_boost_enabled-/; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =(true; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable-/data_boost_enabled; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set auto_partition_mode = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#show variable data_boost_enabled; +set auto_partition_mode = true); NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable data_boost_enabled/#; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =)true; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable/#data_boost_enabled; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set auto_partition_mode = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-show variable data_boost_enabled; +set auto_partition_mode = true-; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable data_boost_enabled/-; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =-true; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable/-data_boost_enabled; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set auto_partition_mode = true; NEW_CONNECTION; -set data_boost_enabled = true; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true+; NEW_CONNECTION; -SET DATA_BOOST_ENABLED = TRUE; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set auto_partition_mode = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set auto_partition_mode =/-true; +NEW_CONNECTION; +set auto_partition_mode = false; +NEW_CONNECTION; +SET AUTO_PARTITION_MODE = FALSE; NEW_CONNECTION; -set data_boost_enabled = true; +set auto_partition_mode = false; NEW_CONNECTION; - set data_boost_enabled = true; + set auto_partition_mode = false; NEW_CONNECTION; - set data_boost_enabled = true; + set auto_partition_mode = false; NEW_CONNECTION; -set data_boost_enabled = true; +set auto_partition_mode = false; NEW_CONNECTION; -set data_boost_enabled = true ; +set auto_partition_mode = false ; NEW_CONNECTION; -set data_boost_enabled = true ; +set auto_partition_mode = false ; NEW_CONNECTION; -set data_boost_enabled = true +set auto_partition_mode = false ; NEW_CONNECTION; -set data_boost_enabled = true; +set auto_partition_mode = false; NEW_CONNECTION; -set data_boost_enabled = true; +set auto_partition_mode = false; NEW_CONNECTION; set -data_boost_enabled +auto_partition_mode = -true; +false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set data_boost_enabled = true; +foo set auto_partition_mode = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = true bar; +set auto_partition_mode = false bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set data_boost_enabled = true; +%set auto_partition_mode = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = true%; +set auto_partition_mode = false%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =%true; +set auto_partition_mode =%false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set data_boost_enabled = true; +_set auto_partition_mode = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = true_; +set auto_partition_mode = false_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =_true; +set auto_partition_mode =_false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set data_boost_enabled = true; +&set auto_partition_mode = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = true&; +set auto_partition_mode = false&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =&true; +set auto_partition_mode =&false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set data_boost_enabled = true; +$set auto_partition_mode = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = true$; +set auto_partition_mode = false$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =$true; +set auto_partition_mode =$false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set data_boost_enabled = true; +@set auto_partition_mode = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = true@; +set auto_partition_mode = false@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =@true; +set auto_partition_mode =@false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set data_boost_enabled = true; +!set auto_partition_mode = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = true!; +set auto_partition_mode = false!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =!true; +set auto_partition_mode =!false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set data_boost_enabled = true; +*set auto_partition_mode = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = true*; +set auto_partition_mode = false*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =*true; +set auto_partition_mode =*false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set data_boost_enabled = true; +(set auto_partition_mode = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = true(; +set auto_partition_mode = false(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =(true; +set auto_partition_mode =(false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set data_boost_enabled = true; +)set auto_partition_mode = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = true); +set auto_partition_mode = false); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =)true; +set auto_partition_mode =)false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set data_boost_enabled = true; +-set auto_partition_mode = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = true-; +set auto_partition_mode = false-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =-true; +set auto_partition_mode =-false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set data_boost_enabled = true; ++set auto_partition_mode = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = true+; +set auto_partition_mode = false+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =+true; +set auto_partition_mode =+false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set data_boost_enabled = true; +-#set auto_partition_mode = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = true-#; +set auto_partition_mode = false-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =-#true; +set auto_partition_mode =-#false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set data_boost_enabled = true; +/set auto_partition_mode = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = true/; +set auto_partition_mode = false/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =/true; +set auto_partition_mode =/false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set data_boost_enabled = true; +\set auto_partition_mode = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = true\; +set auto_partition_mode = false\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =\true; +set auto_partition_mode =\false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set data_boost_enabled = true; +?set auto_partition_mode = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = true?; +set auto_partition_mode = false?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =?true; +set auto_partition_mode =?false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set data_boost_enabled = true; +-/set auto_partition_mode = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = true-/; +set auto_partition_mode = false-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =-/true; +set auto_partition_mode =-/false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set data_boost_enabled = true; +/#set auto_partition_mode = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = true/#; +set auto_partition_mode = false/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =/#true; +set auto_partition_mode =/#false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set data_boost_enabled = true; +/-set auto_partition_mode = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = true/-; +set auto_partition_mode = false/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =/-true; +set auto_partition_mode =/-false; NEW_CONNECTION; -set data_boost_enabled = false; +show variable max_partitions; NEW_CONNECTION; -SET DATA_BOOST_ENABLED = FALSE; +SHOW VARIABLE MAX_PARTITIONS; NEW_CONNECTION; -set data_boost_enabled = false; +show variable max_partitions; NEW_CONNECTION; - set data_boost_enabled = false; + show variable max_partitions; NEW_CONNECTION; - set data_boost_enabled = false; + show variable max_partitions; NEW_CONNECTION; -set data_boost_enabled = false; +show variable max_partitions; NEW_CONNECTION; -set data_boost_enabled = false ; +show variable max_partitions ; NEW_CONNECTION; -set data_boost_enabled = false ; +show variable max_partitions ; NEW_CONNECTION; -set data_boost_enabled = false +show variable max_partitions ; NEW_CONNECTION; -set data_boost_enabled = false; -NEW_CONNECTION; -set data_boost_enabled = false; -NEW_CONNECTION; -set -data_boost_enabled -= -false; +show variable max_partitions; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -foo set data_boost_enabled = false; +show variable max_partitions; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = false bar; +show +variable +max_partitions; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set data_boost_enabled = false; +foo show variable max_partitions; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = false%; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =%false; +%show variable max_partitions; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -_set data_boost_enabled = false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions%; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = false_; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%max_partitions; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =_false; +_show variable max_partitions; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -&set data_boost_enabled = false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions_; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = false&; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_max_partitions; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =&false; +&show variable max_partitions; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -$set data_boost_enabled = false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions&; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = false$; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&max_partitions; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =$false; +$show variable max_partitions; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -@set data_boost_enabled = false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions$; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = false@; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$max_partitions; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =@false; +@show variable max_partitions; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -!set data_boost_enabled = false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions@; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = false!; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@max_partitions; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =!false; +!show variable max_partitions; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -*set data_boost_enabled = false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions!; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = false*; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!max_partitions; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =*false; +*show variable max_partitions; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -(set data_boost_enabled = false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions*; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = false(; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*max_partitions; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =(false; +(show variable max_partitions; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -)set data_boost_enabled = false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions(; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = false); +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(max_partitions; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =)false; +)show variable max_partitions; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT --set data_boost_enabled = false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions); NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = false-; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)max_partitions; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =-false; +-show variable max_partitions; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -+set data_boost_enabled = false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions-; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = false+; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-max_partitions; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =+false; ++show variable max_partitions; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT --#set data_boost_enabled = false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions+; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = false-#; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+max_partitions; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =-#false; +-#show variable max_partitions; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -/set data_boost_enabled = false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions-#; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = false/; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#max_partitions; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =/false; +/show variable max_partitions; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -\set data_boost_enabled = false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions/; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = false\; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/max_partitions; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =\false; +\show variable max_partitions; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -?set data_boost_enabled = false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions\; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = false?; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\max_partitions; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =?false; +?show variable max_partitions; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT --/set data_boost_enabled = false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions?; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = false-/; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?max_partitions; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =-/false; +-/show variable max_partitions; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -/#set data_boost_enabled = false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions-/; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = false/#; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/max_partitions; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =/#false; +/#show variable max_partitions; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -/-set data_boost_enabled = false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions/#; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled = false/-; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#max_partitions; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set data_boost_enabled =/-false; +/-show variable max_partitions; NEW_CONNECTION; -show variable auto_partition_mode; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitions/-; NEW_CONNECTION; -SHOW VARIABLE AUTO_PARTITION_MODE; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-max_partitions; NEW_CONNECTION; -show variable auto_partition_mode; +set max_partitions = 0; NEW_CONNECTION; - show variable auto_partition_mode; +SET MAX_PARTITIONS = 0; NEW_CONNECTION; - show variable auto_partition_mode; +set max_partitions = 0; +NEW_CONNECTION; + set max_partitions = 0; +NEW_CONNECTION; + set max_partitions = 0; NEW_CONNECTION; -show variable auto_partition_mode; +set max_partitions = 0; NEW_CONNECTION; -show variable auto_partition_mode ; +set max_partitions = 0 ; NEW_CONNECTION; -show variable auto_partition_mode ; +set max_partitions = 0 ; NEW_CONNECTION; -show variable auto_partition_mode +set max_partitions = 0 ; NEW_CONNECTION; -show variable auto_partition_mode; +set max_partitions = 0; NEW_CONNECTION; -show variable auto_partition_mode; +set max_partitions = 0; NEW_CONNECTION; -show -variable -auto_partition_mode; +set +max_partitions += +0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo show variable auto_partition_mode; +foo set max_partitions = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable auto_partition_mode bar; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0 bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%show variable auto_partition_mode; +%set max_partitions = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable auto_partition_mode%; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0%; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable%auto_partition_mode; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =%0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_show variable auto_partition_mode; +_set max_partitions = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable auto_partition_mode_; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0_; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable_auto_partition_mode; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =_0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&show variable auto_partition_mode; +&set max_partitions = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable auto_partition_mode&; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0&; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable&auto_partition_mode; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =&0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$show variable auto_partition_mode; +$set max_partitions = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable auto_partition_mode$; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0$; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable$auto_partition_mode; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =$0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@show variable auto_partition_mode; +@set max_partitions = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable auto_partition_mode@; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0@; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable@auto_partition_mode; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =@0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!show variable auto_partition_mode; +!set max_partitions = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable auto_partition_mode!; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0!; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable!auto_partition_mode; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =!0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*show variable auto_partition_mode; +*set max_partitions = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable auto_partition_mode*; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0*; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable*auto_partition_mode; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =*0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(show variable auto_partition_mode; +(set max_partitions = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable auto_partition_mode(; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0(; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable(auto_partition_mode; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =(0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)show variable auto_partition_mode; +)set max_partitions = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable auto_partition_mode); +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0); NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable)auto_partition_mode; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =)0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --show variable auto_partition_mode; +-set max_partitions = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable auto_partition_mode-; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0-; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable-auto_partition_mode; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =-0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+show variable auto_partition_mode; ++set max_partitions = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable auto_partition_mode+; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0+; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable+auto_partition_mode; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =+0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#show variable auto_partition_mode; +-#set max_partitions = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable auto_partition_mode-#; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0-#; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable-#auto_partition_mode; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =-#0; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set max_partitions = 0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/show variable auto_partition_mode; -NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable auto_partition_mode/; +set max_partitions = 0/; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable/auto_partition_mode; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =/0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\show variable auto_partition_mode; +\set max_partitions = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable auto_partition_mode\; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0\; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable\auto_partition_mode; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =\0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?show variable auto_partition_mode; +?set max_partitions = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable auto_partition_mode?; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0?; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable?auto_partition_mode; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =?0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/show variable auto_partition_mode; +-/set max_partitions = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable auto_partition_mode-/; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0-/; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable-/auto_partition_mode; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =-/0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#show variable auto_partition_mode; +/#set max_partitions = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable auto_partition_mode/#; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0/#; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable/#auto_partition_mode; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =/#0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-show variable auto_partition_mode; +/-set max_partitions = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable auto_partition_mode/-; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions = 0/-; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable/-auto_partition_mode; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitions =/-0; NEW_CONNECTION; -set auto_partition_mode = true; +set max_partitions = 10; NEW_CONNECTION; -SET AUTO_PARTITION_MODE = TRUE; +SET MAX_PARTITIONS = 10; NEW_CONNECTION; -set auto_partition_mode = true; +set max_partitions = 10; NEW_CONNECTION; - set auto_partition_mode = true; + set max_partitions = 10; NEW_CONNECTION; - set auto_partition_mode = true; + set max_partitions = 10; NEW_CONNECTION; -set auto_partition_mode = true; +set max_partitions = 10; NEW_CONNECTION; -set auto_partition_mode = true ; +set max_partitions = 10 ; NEW_CONNECTION; -set auto_partition_mode = true ; +set max_partitions = 10 ; NEW_CONNECTION; -set auto_partition_mode = true +set max_partitions = 10 ; NEW_CONNECTION; -set auto_partition_mode = true; +set max_partitions = 10; NEW_CONNECTION; -set auto_partition_mode = true; +set max_partitions = 10; NEW_CONNECTION; set -auto_partition_mode +max_partitions = -true; +10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set auto_partition_mode = true; +foo set max_partitions = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = true bar; +set max_partitions = 10 bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set auto_partition_mode = true; +%set max_partitions = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = true%; +set max_partitions = 10%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =%true; +set max_partitions =%10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set auto_partition_mode = true; +_set max_partitions = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = true_; +set max_partitions = 10_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =_true; +set max_partitions =_10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set auto_partition_mode = true; +&set max_partitions = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = true&; +set max_partitions = 10&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =&true; +set max_partitions =&10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set auto_partition_mode = true; +$set max_partitions = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = true$; +set max_partitions = 10$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =$true; +set max_partitions =$10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set auto_partition_mode = true; +@set max_partitions = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = true@; +set max_partitions = 10@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =@true; +set max_partitions =@10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set auto_partition_mode = true; +!set max_partitions = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = true!; +set max_partitions = 10!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =!true; +set max_partitions =!10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set auto_partition_mode = true; +*set max_partitions = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = true*; +set max_partitions = 10*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =*true; +set max_partitions =*10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set auto_partition_mode = true; +(set max_partitions = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = true(; +set max_partitions = 10(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =(true; +set max_partitions =(10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set auto_partition_mode = true; +)set max_partitions = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = true); +set max_partitions = 10); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =)true; +set max_partitions =)10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set auto_partition_mode = true; +-set max_partitions = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = true-; +set max_partitions = 10-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =-true; +set max_partitions =-10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set auto_partition_mode = true; ++set max_partitions = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = true+; +set max_partitions = 10+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =+true; +set max_partitions =+10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set auto_partition_mode = true; +-#set max_partitions = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = true-#; +set max_partitions = 10-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =-#true; +set max_partitions =-#10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set auto_partition_mode = true; +/set max_partitions = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = true/; +set max_partitions = 10/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =/true; +set max_partitions =/10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set auto_partition_mode = true; +\set max_partitions = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = true\; +set max_partitions = 10\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =\true; +set max_partitions =\10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set auto_partition_mode = true; +?set max_partitions = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = true?; +set max_partitions = 10?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =?true; +set max_partitions =?10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set auto_partition_mode = true; +-/set max_partitions = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = true-/; +set max_partitions = 10-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =-/true; +set max_partitions =-/10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set auto_partition_mode = true; +/#set max_partitions = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = true/#; +set max_partitions = 10/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =/#true; +set max_partitions =/#10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set auto_partition_mode = true; +/-set max_partitions = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = true/-; +set max_partitions = 10/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =/-true; +set max_partitions =/-10; NEW_CONNECTION; -set auto_partition_mode = false; +show variable max_partitioned_parallelism; NEW_CONNECTION; -SET AUTO_PARTITION_MODE = FALSE; +SHOW VARIABLE MAX_PARTITIONED_PARALLELISM; NEW_CONNECTION; -set auto_partition_mode = false; +show variable max_partitioned_parallelism; NEW_CONNECTION; - set auto_partition_mode = false; + show variable max_partitioned_parallelism; NEW_CONNECTION; - set auto_partition_mode = false; + show variable max_partitioned_parallelism; NEW_CONNECTION; -set auto_partition_mode = false; +show variable max_partitioned_parallelism; NEW_CONNECTION; -set auto_partition_mode = false ; +show variable max_partitioned_parallelism ; NEW_CONNECTION; -set auto_partition_mode = false ; +show variable max_partitioned_parallelism ; NEW_CONNECTION; -set auto_partition_mode = false +show variable max_partitioned_parallelism ; NEW_CONNECTION; -set auto_partition_mode = false; +show variable max_partitioned_parallelism; NEW_CONNECTION; -set auto_partition_mode = false; +show variable max_partitioned_parallelism; NEW_CONNECTION; -set -auto_partition_mode -= -false; +show +variable +max_partitioned_parallelism; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set auto_partition_mode = false; +foo show variable max_partitioned_parallelism; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = false bar; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set auto_partition_mode = false; +%show variable max_partitioned_parallelism; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = false%; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism%; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =%false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%max_partitioned_parallelism; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set auto_partition_mode = false; +_show variable max_partitioned_parallelism; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = false_; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism_; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =_false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_max_partitioned_parallelism; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set auto_partition_mode = false; +&show variable max_partitioned_parallelism; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = false&; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism&; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =&false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&max_partitioned_parallelism; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set auto_partition_mode = false; +$show variable max_partitioned_parallelism; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = false$; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism$; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =$false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$max_partitioned_parallelism; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set auto_partition_mode = false; +@show variable max_partitioned_parallelism; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = false@; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism@; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =@false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@max_partitioned_parallelism; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set auto_partition_mode = false; +!show variable max_partitioned_parallelism; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = false!; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism!; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =!false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!max_partitioned_parallelism; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set auto_partition_mode = false; +*show variable max_partitioned_parallelism; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = false*; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism*; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =*false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*max_partitioned_parallelism; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set auto_partition_mode = false; +(show variable max_partitioned_parallelism; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = false(; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism(; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =(false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(max_partitioned_parallelism; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set auto_partition_mode = false; +)show variable max_partitioned_parallelism; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = false); +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism); NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =)false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)max_partitioned_parallelism; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set auto_partition_mode = false; +-show variable max_partitioned_parallelism; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = false-; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism-; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =-false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-max_partitioned_parallelism; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set auto_partition_mode = false; ++show variable max_partitioned_parallelism; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = false+; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism+; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =+false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+max_partitioned_parallelism; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set auto_partition_mode = false; +-#show variable max_partitioned_parallelism; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = false-#; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism-#; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =-#false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#max_partitioned_parallelism; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set auto_partition_mode = false; +/show variable max_partitioned_parallelism; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = false/; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism/; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =/false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/max_partitioned_parallelism; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set auto_partition_mode = false; +\show variable max_partitioned_parallelism; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = false\; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism\; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =\false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\max_partitioned_parallelism; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set auto_partition_mode = false; +?show variable max_partitioned_parallelism; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = false?; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism?; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =?false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?max_partitioned_parallelism; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set auto_partition_mode = false; +-/show variable max_partitioned_parallelism; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = false-/; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism-/; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =-/false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/max_partitioned_parallelism; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set auto_partition_mode = false; +/#show variable max_partitioned_parallelism; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = false/#; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism/#; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =/#false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#max_partitioned_parallelism; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set auto_partition_mode = false; +/-show variable max_partitioned_parallelism; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode = false/-; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable max_partitioned_parallelism/-; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set auto_partition_mode =/-false; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-max_partitioned_parallelism; NEW_CONNECTION; -show variable max_partitions; +set max_partitioned_parallelism = 0; NEW_CONNECTION; -SHOW VARIABLE MAX_PARTITIONS; +SET MAX_PARTITIONED_PARALLELISM = 0; NEW_CONNECTION; -show variable max_partitions; +set max_partitioned_parallelism = 0; NEW_CONNECTION; - show variable max_partitions; + set max_partitioned_parallelism = 0; NEW_CONNECTION; - show variable max_partitions; + set max_partitioned_parallelism = 0; NEW_CONNECTION; -show variable max_partitions; +set max_partitioned_parallelism = 0; NEW_CONNECTION; -show variable max_partitions ; +set max_partitioned_parallelism = 0 ; NEW_CONNECTION; -show variable max_partitions ; +set max_partitioned_parallelism = 0 ; NEW_CONNECTION; -show variable max_partitions +set max_partitioned_parallelism = 0 ; NEW_CONNECTION; -show variable max_partitions; +set max_partitioned_parallelism = 0; NEW_CONNECTION; -show variable max_partitions; +set max_partitioned_parallelism = 0; NEW_CONNECTION; -show -variable -max_partitions; +set +max_partitioned_parallelism += +0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo show variable max_partitions; +foo set max_partitioned_parallelism = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitions bar; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0 bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%show variable max_partitions; +%set max_partitioned_parallelism = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitions%; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0%; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable%max_partitions; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =%0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_show variable max_partitions; +_set max_partitioned_parallelism = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitions_; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0_; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable_max_partitions; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =_0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&show variable max_partitions; +&set max_partitioned_parallelism = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitions&; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0&; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable&max_partitions; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =&0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$show variable max_partitions; +$set max_partitioned_parallelism = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitions$; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0$; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable$max_partitions; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =$0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@show variable max_partitions; +@set max_partitioned_parallelism = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitions@; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0@; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable@max_partitions; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =@0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!show variable max_partitions; +!set max_partitioned_parallelism = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitions!; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0!; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable!max_partitions; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =!0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*show variable max_partitions; +*set max_partitioned_parallelism = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitions*; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0*; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable*max_partitions; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =*0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(show variable max_partitions; +(set max_partitioned_parallelism = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitions(; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0(; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable(max_partitions; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =(0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)show variable max_partitions; +)set max_partitioned_parallelism = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitions); +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0); NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable)max_partitions; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =)0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --show variable max_partitions; +-set max_partitioned_parallelism = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitions-; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0-; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable-max_partitions; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =-0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+show variable max_partitions; ++set max_partitioned_parallelism = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitions+; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0+; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable+max_partitions; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =+0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#show variable max_partitions; +-#set max_partitioned_parallelism = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitions-#; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0-#; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable-#max_partitions; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =-#0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/show variable max_partitions; +/set max_partitioned_parallelism = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitions/; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0/; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable/max_partitions; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =/0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\show variable max_partitions; +\set max_partitioned_parallelism = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitions\; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0\; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable\max_partitions; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =\0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?show variable max_partitions; +?set max_partitioned_parallelism = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitions?; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0?; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable?max_partitions; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =?0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/show variable max_partitions; +-/set max_partitioned_parallelism = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitions-/; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0-/; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable-/max_partitions; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =-/0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#show variable max_partitions; +/#set max_partitioned_parallelism = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitions/#; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0/#; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable/#max_partitions; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =/#0; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-show variable max_partitions; +/-set max_partitioned_parallelism = 0; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitions/-; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism = 0/-; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable/-max_partitions; +@EXPECT EXCEPTION INVALID_ARGUMENT +set max_partitioned_parallelism =/-0; NEW_CONNECTION; -set max_partitions = 0; +set max_partitioned_parallelism = 10; NEW_CONNECTION; -SET MAX_PARTITIONS = 0; +SET MAX_PARTITIONED_PARALLELISM = 10; NEW_CONNECTION; -set max_partitions = 0; +set max_partitioned_parallelism = 10; NEW_CONNECTION; - set max_partitions = 0; + set max_partitioned_parallelism = 10; NEW_CONNECTION; - set max_partitions = 0; + set max_partitioned_parallelism = 10; NEW_CONNECTION; -set max_partitions = 0; +set max_partitioned_parallelism = 10; NEW_CONNECTION; -set max_partitions = 0 ; +set max_partitioned_parallelism = 10 ; NEW_CONNECTION; -set max_partitions = 0 ; +set max_partitioned_parallelism = 10 ; NEW_CONNECTION; -set max_partitions = 0 +set max_partitioned_parallelism = 10 ; NEW_CONNECTION; -set max_partitions = 0; +set max_partitioned_parallelism = 10; NEW_CONNECTION; -set max_partitions = 0; +set max_partitioned_parallelism = 10; NEW_CONNECTION; set -max_partitions +max_partitioned_parallelism = -0; +10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set max_partitions = 0; +foo set max_partitioned_parallelism = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 0 bar; +set max_partitioned_parallelism = 10 bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set max_partitions = 0; +%set max_partitioned_parallelism = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 0%; +set max_partitioned_parallelism = 10%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =%0; +set max_partitioned_parallelism =%10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set max_partitions = 0; +_set max_partitioned_parallelism = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 0_; +set max_partitioned_parallelism = 10_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =_0; +set max_partitioned_parallelism =_10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set max_partitions = 0; +&set max_partitioned_parallelism = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 0&; +set max_partitioned_parallelism = 10&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =&0; +set max_partitioned_parallelism =&10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set max_partitions = 0; +$set max_partitioned_parallelism = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 0$; +set max_partitioned_parallelism = 10$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =$0; +set max_partitioned_parallelism =$10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set max_partitions = 0; +@set max_partitioned_parallelism = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 0@; +set max_partitioned_parallelism = 10@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =@0; +set max_partitioned_parallelism =@10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set max_partitions = 0; +!set max_partitioned_parallelism = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 0!; +set max_partitioned_parallelism = 10!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =!0; +set max_partitioned_parallelism =!10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set max_partitions = 0; +*set max_partitioned_parallelism = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 0*; +set max_partitioned_parallelism = 10*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =*0; +set max_partitioned_parallelism =*10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set max_partitions = 0; +(set max_partitioned_parallelism = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 0(; +set max_partitioned_parallelism = 10(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =(0; +set max_partitioned_parallelism =(10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set max_partitions = 0; +)set max_partitioned_parallelism = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 0); +set max_partitioned_parallelism = 10); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =)0; +set max_partitioned_parallelism =)10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set max_partitions = 0; +-set max_partitioned_parallelism = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 0-; +set max_partitioned_parallelism = 10-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =-0; +set max_partitioned_parallelism =-10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set max_partitions = 0; ++set max_partitioned_parallelism = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 0+; +set max_partitioned_parallelism = 10+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =+0; +set max_partitioned_parallelism =+10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set max_partitions = 0; +-#set max_partitioned_parallelism = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 0-#; +set max_partitioned_parallelism = 10-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =-#0; +set max_partitioned_parallelism =-#10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set max_partitions = 0; +/set max_partitioned_parallelism = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 0/; +set max_partitioned_parallelism = 10/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =/0; +set max_partitioned_parallelism =/10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set max_partitions = 0; +\set max_partitioned_parallelism = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 0\; +set max_partitioned_parallelism = 10\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =\0; +set max_partitioned_parallelism =\10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set max_partitions = 0; +?set max_partitioned_parallelism = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 0?; +set max_partitioned_parallelism = 10?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =?0; +set max_partitioned_parallelism =?10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set max_partitions = 0; +-/set max_partitioned_parallelism = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 0-/; +set max_partitioned_parallelism = 10-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =-/0; +set max_partitioned_parallelism =-/10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set max_partitions = 0; +/#set max_partitioned_parallelism = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 0/#; +set max_partitioned_parallelism = 10/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =/#0; +set max_partitioned_parallelism =/#10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set max_partitions = 0; +/-set max_partitioned_parallelism = 10; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 0/-; +set max_partitioned_parallelism = 10/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =/-0; +set max_partitioned_parallelism =/-10; NEW_CONNECTION; -set max_partitions = 10; +set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; -SET MAX_PARTITIONS = 10; +SET PROTO_DESCRIPTORS='PROTODESCRIPTORSBASE64'; NEW_CONNECTION; -set max_partitions = 10; +set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; - set max_partitions = 10; + set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; - set max_partitions = 10; + set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; -set max_partitions = 10; +set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; -set max_partitions = 10 ; +set proto_descriptors='protodescriptorsbase64' ; NEW_CONNECTION; -set max_partitions = 10 ; +set proto_descriptors='protodescriptorsbase64' ; NEW_CONNECTION; -set max_partitions = 10 +set proto_descriptors='protodescriptorsbase64' ; NEW_CONNECTION; -set max_partitions = 10; +set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; -set max_partitions = 10; +set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; set -max_partitions -= -10; +proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set max_partitions = 10; +foo set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 10 bar; +set proto_descriptors='protodescriptorsbase64' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set max_partitions = 10; +%set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 10%; +set proto_descriptors='protodescriptorsbase64'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =%10; +set%proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set max_partitions = 10; +_set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 10_; +set proto_descriptors='protodescriptorsbase64'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =_10; +set_proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set max_partitions = 10; +&set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 10&; +set proto_descriptors='protodescriptorsbase64'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =&10; +set&proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set max_partitions = 10; +$set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 10$; +set proto_descriptors='protodescriptorsbase64'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =$10; +set$proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set max_partitions = 10; +@set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 10@; +set proto_descriptors='protodescriptorsbase64'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =@10; +set@proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set max_partitions = 10; +!set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 10!; +set proto_descriptors='protodescriptorsbase64'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =!10; +set!proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set max_partitions = 10; +*set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 10*; +set proto_descriptors='protodescriptorsbase64'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =*10; +set*proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set max_partitions = 10; +(set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 10(; +set proto_descriptors='protodescriptorsbase64'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =(10; +set(proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set max_partitions = 10; +)set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 10); +set proto_descriptors='protodescriptorsbase64'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =)10; +set)proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set max_partitions = 10; +-set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 10-; +set proto_descriptors='protodescriptorsbase64'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =-10; +set-proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set max_partitions = 10; ++set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 10+; +set proto_descriptors='protodescriptorsbase64'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =+10; +set+proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set max_partitions = 10; +-#set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 10-#; +set proto_descriptors='protodescriptorsbase64'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =-#10; +set-#proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set max_partitions = 10; +/set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 10/; +set proto_descriptors='protodescriptorsbase64'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =/10; +set/proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set max_partitions = 10; +\set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 10\; +set proto_descriptors='protodescriptorsbase64'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =\10; +set\proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set max_partitions = 10; +?set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 10?; +set proto_descriptors='protodescriptorsbase64'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =?10; +set?proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set max_partitions = 10; +-/set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 10-/; +set proto_descriptors='protodescriptorsbase64'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =-/10; +set-/proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set max_partitions = 10; +/#set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 10/#; +set proto_descriptors='protodescriptorsbase64'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =/#10; +set/#proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set max_partitions = 10; +/-set proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions = 10/-; +set proto_descriptors='protodescriptorsbase64'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitions =/-10; +set/-proto_descriptors='protodescriptorsbase64'; NEW_CONNECTION; -show variable max_partitioned_parallelism; +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -SHOW VARIABLE MAX_PARTITIONED_PARALLELISM; +SET PROTO_DESCRIPTORS_FILE_PATH='SRC/TEST/RESOURCES/COM/GOOGLE/CLOUD/SPANNER/DESCRIPTORS.PB'; NEW_CONNECTION; -show variable max_partitioned_parallelism; +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; - show variable max_partitioned_parallelism; + set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; - show variable max_partitioned_parallelism; + set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -show variable max_partitioned_parallelism; +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -show variable max_partitioned_parallelism ; +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb' ; NEW_CONNECTION; -show variable max_partitioned_parallelism ; +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb' ; NEW_CONNECTION; -show variable max_partitioned_parallelism +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb' ; NEW_CONNECTION; -show variable max_partitioned_parallelism; +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -show variable max_partitioned_parallelism; +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -show -variable -max_partitioned_parallelism; +set +proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo show variable max_partitioned_parallelism; +foo set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitioned_parallelism bar; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%show variable max_partitioned_parallelism; +%set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitioned_parallelism%; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'%; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable%max_partitioned_parallelism; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_show variable max_partitioned_parallelism; +_set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitioned_parallelism_; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'_; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable_max_partitioned_parallelism; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&show variable max_partitioned_parallelism; +&set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitioned_parallelism&; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'&; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable&max_partitioned_parallelism; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$show variable max_partitioned_parallelism; +$set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitioned_parallelism$; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'$; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable$max_partitioned_parallelism; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@show variable max_partitioned_parallelism; +@set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitioned_parallelism@; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'@; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable@max_partitioned_parallelism; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!show variable max_partitioned_parallelism; +!set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitioned_parallelism!; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'!; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable!max_partitioned_parallelism; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*show variable max_partitioned_parallelism; +*set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitioned_parallelism*; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'*; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable*max_partitioned_parallelism; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(show variable max_partitioned_parallelism; +(set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitioned_parallelism(; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'(; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable(max_partitioned_parallelism; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)show variable max_partitioned_parallelism; +)set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitioned_parallelism); +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'); NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable)max_partitioned_parallelism; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --show variable max_partitioned_parallelism; +-set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitioned_parallelism-; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'-; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable-max_partitioned_parallelism; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+show variable max_partitioned_parallelism; ++set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitioned_parallelism+; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'+; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable+max_partitioned_parallelism; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#show variable max_partitioned_parallelism; +-#set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitioned_parallelism-#; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'-#; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable-#max_partitioned_parallelism; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/show variable max_partitioned_parallelism; +/set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitioned_parallelism/; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'/; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable/max_partitioned_parallelism; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\show variable max_partitioned_parallelism; +\set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitioned_parallelism\; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'\; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable\max_partitioned_parallelism; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?show variable max_partitioned_parallelism; +?set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitioned_parallelism?; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'?; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable?max_partitioned_parallelism; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/show variable max_partitioned_parallelism; +-/set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitioned_parallelism-/; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'-/; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable-/max_partitioned_parallelism; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#show variable max_partitioned_parallelism; +/#set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitioned_parallelism/#; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'/#; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable/#max_partitioned_parallelism; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-show variable max_partitioned_parallelism; +/-set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable max_partitioned_parallelism/-; +@EXPECT EXCEPTION INVALID_ARGUMENT +set proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'/-; NEW_CONNECTION; -@EXPECT EXCEPTION UNIMPLEMENTED -show variable/-max_partitioned_parallelism; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-proto_descriptors_file_path='src/test/resources/com/google/cloud/spanner/descriptors.pb'; NEW_CONNECTION; -set max_partitioned_parallelism = 0; +show variable proto_descriptors; NEW_CONNECTION; -SET MAX_PARTITIONED_PARALLELISM = 0; +SHOW VARIABLE PROTO_DESCRIPTORS; NEW_CONNECTION; -set max_partitioned_parallelism = 0; +show variable proto_descriptors; NEW_CONNECTION; - set max_partitioned_parallelism = 0; + show variable proto_descriptors; NEW_CONNECTION; - set max_partitioned_parallelism = 0; + show variable proto_descriptors; NEW_CONNECTION; -set max_partitioned_parallelism = 0; +show variable proto_descriptors; NEW_CONNECTION; -set max_partitioned_parallelism = 0 ; +show variable proto_descriptors ; NEW_CONNECTION; -set max_partitioned_parallelism = 0 ; +show variable proto_descriptors ; NEW_CONNECTION; -set max_partitioned_parallelism = 0 +show variable proto_descriptors ; NEW_CONNECTION; -set max_partitioned_parallelism = 0; -NEW_CONNECTION; -set max_partitioned_parallelism = 0; -NEW_CONNECTION; -set -max_partitioned_parallelism -= -0; -NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -foo set max_partitioned_parallelism = 0; +show variable proto_descriptors; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 0 bar; +show variable proto_descriptors; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -%set max_partitioned_parallelism = 0; +show +variable +proto_descriptors; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 0%; +foo show variable proto_descriptors; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =%0; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set max_partitioned_parallelism = 0; +%show variable proto_descriptors; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 0_; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors%; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =_0; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%proto_descriptors; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set max_partitioned_parallelism = 0; +_show variable proto_descriptors; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 0&; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =&0; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_proto_descriptors; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set max_partitioned_parallelism = 0; +&show variable proto_descriptors; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 0$; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors&; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =$0; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&proto_descriptors; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set max_partitioned_parallelism = 0; +$show variable proto_descriptors; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 0@; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors$; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =@0; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$proto_descriptors; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set max_partitioned_parallelism = 0; +@show variable proto_descriptors; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 0!; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors@; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =!0; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@proto_descriptors; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set max_partitioned_parallelism = 0; +!show variable proto_descriptors; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 0*; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors!; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =*0; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!proto_descriptors; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set max_partitioned_parallelism = 0; +*show variable proto_descriptors; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 0(; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors*; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =(0; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*proto_descriptors; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set max_partitioned_parallelism = 0; +(show variable proto_descriptors; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 0); +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors(; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =)0; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(proto_descriptors; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set max_partitioned_parallelism = 0; +)show variable proto_descriptors; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 0-; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors); NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =-0; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)proto_descriptors; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set max_partitioned_parallelism = 0; +-show variable proto_descriptors; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 0+; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors-; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =+0; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-proto_descriptors; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set max_partitioned_parallelism = 0; ++show variable proto_descriptors; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 0-#; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors+; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =-#0; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+proto_descriptors; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set max_partitioned_parallelism = 0; +-#show variable proto_descriptors; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 0/; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors-#; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =/0; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#proto_descriptors; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set max_partitioned_parallelism = 0; +/show variable proto_descriptors; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 0\; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors/; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =\0; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/proto_descriptors; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set max_partitioned_parallelism = 0; +\show variable proto_descriptors; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 0?; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors\; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =?0; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\proto_descriptors; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set max_partitioned_parallelism = 0; +?show variable proto_descriptors; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 0-/; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors?; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =-/0; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?proto_descriptors; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set max_partitioned_parallelism = 0; +-/show variable proto_descriptors; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 0/#; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors-/; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =/#0; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/proto_descriptors; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set max_partitioned_parallelism = 0; +/#show variable proto_descriptors; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 0/-; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#proto_descriptors; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =/-0; +/-show variable proto_descriptors; NEW_CONNECTION; -set max_partitioned_parallelism = 10; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors/-; NEW_CONNECTION; -SET MAX_PARTITIONED_PARALLELISM = 10; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-proto_descriptors; NEW_CONNECTION; -set max_partitioned_parallelism = 10; +show variable proto_descriptors_file_path; NEW_CONNECTION; - set max_partitioned_parallelism = 10; +SHOW VARIABLE PROTO_DESCRIPTORS_FILE_PATH; NEW_CONNECTION; - set max_partitioned_parallelism = 10; +show variable proto_descriptors_file_path; +NEW_CONNECTION; + show variable proto_descriptors_file_path; +NEW_CONNECTION; + show variable proto_descriptors_file_path; NEW_CONNECTION; -set max_partitioned_parallelism = 10; +show variable proto_descriptors_file_path; NEW_CONNECTION; -set max_partitioned_parallelism = 10 ; +show variable proto_descriptors_file_path ; NEW_CONNECTION; -set max_partitioned_parallelism = 10 ; +show variable proto_descriptors_file_path ; NEW_CONNECTION; -set max_partitioned_parallelism = 10 +show variable proto_descriptors_file_path ; NEW_CONNECTION; -set max_partitioned_parallelism = 10; +show variable proto_descriptors_file_path; NEW_CONNECTION; -set max_partitioned_parallelism = 10; +show variable proto_descriptors_file_path; NEW_CONNECTION; -set -max_partitioned_parallelism -= -10; +show +variable +proto_descriptors_file_path; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set max_partitioned_parallelism = 10; +foo show variable proto_descriptors_file_path; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 10 bar; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set max_partitioned_parallelism = 10; +%show variable proto_descriptors_file_path; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 10%; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path%; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =%10; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%proto_descriptors_file_path; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set max_partitioned_parallelism = 10; +_show variable proto_descriptors_file_path; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 10_; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path_; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =_10; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_proto_descriptors_file_path; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set max_partitioned_parallelism = 10; +&show variable proto_descriptors_file_path; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 10&; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path&; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =&10; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&proto_descriptors_file_path; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set max_partitioned_parallelism = 10; +$show variable proto_descriptors_file_path; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 10$; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path$; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =$10; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$proto_descriptors_file_path; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set max_partitioned_parallelism = 10; +@show variable proto_descriptors_file_path; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 10@; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path@; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =@10; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@proto_descriptors_file_path; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set max_partitioned_parallelism = 10; +!show variable proto_descriptors_file_path; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 10!; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path!; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =!10; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!proto_descriptors_file_path; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set max_partitioned_parallelism = 10; +*show variable proto_descriptors_file_path; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 10*; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path*; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =*10; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*proto_descriptors_file_path; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set max_partitioned_parallelism = 10; +(show variable proto_descriptors_file_path; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 10(; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path(; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =(10; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(proto_descriptors_file_path; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set max_partitioned_parallelism = 10; +)show variable proto_descriptors_file_path; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 10); +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path); NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =)10; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)proto_descriptors_file_path; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set max_partitioned_parallelism = 10; +-show variable proto_descriptors_file_path; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 10-; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path-; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =-10; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-proto_descriptors_file_path; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set max_partitioned_parallelism = 10; ++show variable proto_descriptors_file_path; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 10+; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path+; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =+10; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+proto_descriptors_file_path; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set max_partitioned_parallelism = 10; +-#show variable proto_descriptors_file_path; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 10-#; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path-#; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =-#10; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#proto_descriptors_file_path; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set max_partitioned_parallelism = 10; +/show variable proto_descriptors_file_path; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 10/; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path/; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =/10; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/proto_descriptors_file_path; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set max_partitioned_parallelism = 10; +\show variable proto_descriptors_file_path; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 10\; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path\; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =\10; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\proto_descriptors_file_path; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set max_partitioned_parallelism = 10; +?show variable proto_descriptors_file_path; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 10?; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path?; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =?10; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?proto_descriptors_file_path; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set max_partitioned_parallelism = 10; +-/show variable proto_descriptors_file_path; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 10-/; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path-/; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =-/10; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/proto_descriptors_file_path; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set max_partitioned_parallelism = 10; +/#show variable proto_descriptors_file_path; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 10/#; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path/#; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =/#10; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#proto_descriptors_file_path; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set max_partitioned_parallelism = 10; +/-show variable proto_descriptors_file_path; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism = 10/-; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable proto_descriptors_file_path/-; NEW_CONNECTION; -@EXPECT EXCEPTION INVALID_ARGUMENT -set max_partitioned_parallelism =/-10; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-proto_descriptors_file_path; diff --git a/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ConnectionImplGeneratedSqlScriptTest.sql b/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ConnectionImplGeneratedSqlScriptTest.sql index 84275c3d5c3..3a1aa48ef7b 100644 --- a/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ConnectionImplGeneratedSqlScriptTest.sql +++ b/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ConnectionImplGeneratedSqlScriptTest.sql @@ -160,15 +160,15 @@ NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=FALSE; COMMIT; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:32.894000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:32.894000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:28.272000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:28.272000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=FALSE; COMMIT; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:32.894000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:28.272000000Z'; NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=FALSE; @@ -510,15 +510,15 @@ NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=FALSE; SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.008000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:33.008000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:28.411000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:28.411000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=FALSE; SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.008000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:28.411000000Z'; NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=FALSE; @@ -950,8 +950,8 @@ BEGIN TRANSACTION; @EXPECT RESULT_SET 'TEST',1 SELECT 1 AS TEST; ROLLBACK; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.111000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:33.111000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:28.520000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:28.520000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=TRUE; @@ -961,7 +961,7 @@ BEGIN TRANSACTION; SELECT 1 AS TEST; ROLLBACK; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.111000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:28.520000000Z'; NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=FALSE; @@ -1462,8 +1462,8 @@ BEGIN TRANSACTION; @EXPECT RESULT_SET 'TEST',1 SELECT 1 AS TEST; COMMIT; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.217000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:33.217000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:28.642000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:28.642000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=TRUE; @@ -1473,7 +1473,7 @@ BEGIN TRANSACTION; SELECT 1 AS TEST; COMMIT; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.217000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:28.642000000Z'; NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=FALSE; @@ -1876,15 +1876,15 @@ NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=FALSE; BEGIN TRANSACTION; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.318000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:33.318000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:28.738000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:28.738000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=FALSE; BEGIN TRANSACTION; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.318000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:28.738000000Z'; NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=FALSE; @@ -2243,14 +2243,14 @@ SET AUTOCOMMIT=FALSE; @EXPECT RESULT_SET 'TEST',1 SELECT 1 AS TEST; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.388000000Z'; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:28.817000000Z'; NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=FALSE; @EXPECT RESULT_SET 'TEST',1 SELECT 1 AS TEST; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.388000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:28.817000000Z'; NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=FALSE; @@ -2600,13 +2600,13 @@ SET READONLY=TRUE; SET AUTOCOMMIT=FALSE; SELECT 1 AS TEST; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.472000000Z'; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:28.905000000Z'; NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=FALSE; SELECT 1 AS TEST; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.472000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:28.905000000Z'; NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=FALSE; @@ -2910,14 +2910,14 @@ SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=FALSE; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.543000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:33.543000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:28.987000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:28.987000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=FALSE; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.543000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:28.987000000Z'; NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=FALSE; @@ -3245,15 +3245,15 @@ NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; COMMIT; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.630000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:33.630000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.073000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:29.073000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; COMMIT; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.630000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.073000000Z'; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -3662,8 +3662,8 @@ SET AUTOCOMMIT=FALSE; START BATCH DDL; CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); RUN BATCH; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.713000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:33.713000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.148000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:29.148000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; @@ -3672,7 +3672,7 @@ START BATCH DDL; CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); RUN BATCH; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.713000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.148000000Z'; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -4081,14 +4081,14 @@ SET AUTOCOMMIT=FALSE; START BATCH DDL; CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.778000000Z'; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.225000000Z'; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; START BATCH DDL; CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.778000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.225000000Z'; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -4438,13 +4438,13 @@ SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; START BATCH DDL; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.833000000Z'; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.292000000Z'; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; START BATCH DDL; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.833000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.292000000Z'; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -4877,8 +4877,8 @@ SET TRANSACTION READ ONLY; @EXPECT RESULT_SET 'TEST',1 SELECT 1 AS TEST; COMMIT; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.892000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:33.892000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.359000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:29.359000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; @@ -4888,7 +4888,7 @@ SET TRANSACTION READ ONLY; SELECT 1 AS TEST; COMMIT; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.892000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.359000000Z'; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -5288,15 +5288,15 @@ NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; SET TRANSACTION READ ONLY; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.959000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:33.959000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.436000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:29.436000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; SET TRANSACTION READ ONLY; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.959000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.436000000Z'; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -5641,15 +5641,15 @@ NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.012000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.012000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.502000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:29.502000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; SET READ_ONLY_STALENESS='EXACT_STALENESS 10s'; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.012000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.502000000Z'; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -6088,8 +6088,8 @@ BEGIN TRANSACTION; @EXPECT RESULT_SET 'TEST',1 SELECT 1 AS TEST; ROLLBACK; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.073000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.073000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.570000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:29.570000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; @@ -6099,7 +6099,7 @@ BEGIN TRANSACTION; SELECT 1 AS TEST; ROLLBACK; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.073000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.570000000Z'; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -6607,8 +6607,8 @@ BEGIN TRANSACTION; @EXPECT RESULT_SET 'TEST',1 SELECT 1 AS TEST; COMMIT; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.152000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.152000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.663000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:29.663000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; @@ -6618,7 +6618,7 @@ BEGIN TRANSACTION; SELECT 1 AS TEST; COMMIT; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.152000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.663000000Z'; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -7023,15 +7023,15 @@ NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; BEGIN TRANSACTION; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.223000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.223000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.740000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:29.740000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; BEGIN TRANSACTION; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.223000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.740000000Z'; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -7394,14 +7394,14 @@ SET AUTOCOMMIT=FALSE; @EXPECT RESULT_SET 'TEST',1 SELECT 1 AS TEST; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.285000000Z'; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.795000000Z'; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; @EXPECT RESULT_SET 'TEST',1 SELECT 1 AS TEST; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.285000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.795000000Z'; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -7756,13 +7756,13 @@ SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; SELECT 1 AS TEST; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.350000000Z'; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.862000000Z'; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; SELECT 1 AS TEST; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.350000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.862000000Z'; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -8075,14 +8075,14 @@ SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.415000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.415000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.922000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:29.922000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.415000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.922000000Z'; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -8392,13 +8392,13 @@ SET READONLY=FALSE; SET AUTOCOMMIT=TRUE; START BATCH DDL; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.468000000Z'; +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.975000000Z'; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=TRUE; START BATCH DDL; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.468000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.975000000Z'; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=TRUE; @@ -8753,8 +8753,8 @@ SET READONLY=FALSE; SET AUTOCOMMIT=TRUE; BEGIN TRANSACTION; SET TRANSACTION READ ONLY; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.517000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.517000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.028000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.028000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; @@ -8762,7 +8762,7 @@ SET AUTOCOMMIT=TRUE; BEGIN TRANSACTION; SET TRANSACTION READ ONLY; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.517000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.028000000Z'; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=TRUE; @@ -9197,8 +9197,8 @@ SET AUTOCOMMIT=TRUE; BEGIN TRANSACTION; UPDATE foo SET bar=1; COMMIT; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.575000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.575000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.084000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.084000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; @@ -9206,8 +9206,8 @@ SET AUTOCOMMIT=TRUE; BEGIN TRANSACTION; UPDATE foo SET bar=1; COMMIT; -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.575000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-04-22T15:43:34.575000000Z' +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.084000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-09-09T09:13:30.084000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; @@ -9593,15 +9593,15 @@ NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=TRUE; BEGIN TRANSACTION; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.641000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.641000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.150000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.150000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=TRUE; BEGIN TRANSACTION; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.641000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.150000000Z'; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=TRUE; @@ -9952,15 +9952,15 @@ NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=TRUE; CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.691000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.691000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.199000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.199000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=TRUE; CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.691000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-04-22T15:43:34.691000000Z' +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.199000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-09-09T09:13:30.199000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; @@ -10320,15 +10320,15 @@ NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=TRUE; UPDATE foo SET bar=1; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.768000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.768000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.257000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.257000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=TRUE; UPDATE foo SET bar=1; -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.768000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-04-22T15:43:34.768000000Z' +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.257000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-09-09T09:13:30.257000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; @@ -10718,16 +10718,16 @@ SET READONLY=FALSE; SET AUTOCOMMIT=TRUE; @EXPECT RESULT_SET 'TEST',1 SELECT 1 AS TEST; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.829000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.829000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.314000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.314000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=TRUE; @EXPECT RESULT_SET 'TEST',1 SELECT 1 AS TEST; -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.829000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-04-22T15:43:34.829000000Z' +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.314000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-09-09T09:13:30.314000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; @@ -11110,15 +11110,15 @@ NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=TRUE; SELECT 1 AS TEST; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.886000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.886000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.371000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.371000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=TRUE; SELECT 1 AS TEST; -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.886000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-04-22T15:43:34.886000000Z' +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.371000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-09-09T09:13:30.371000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; @@ -11448,14 +11448,14 @@ SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=TRUE; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.946000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.946000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.426000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.426000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; SET AUTOCOMMIT=TRUE; -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.946000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-04-22T15:43:34.946000000Z' +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.426000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-09-09T09:13:30.426000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=FALSE; @@ -11778,15 +11778,15 @@ NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=TRUE; SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.999000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.999000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.477000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.477000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=TRUE; SET READ_ONLY_STALENESS='MAX_STALENESS 10s'; -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.999000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-04-22T15:43:34.999000000Z' +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.477000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-09-09T09:13:30.477000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=TRUE; @@ -12193,8 +12193,8 @@ SET AUTOCOMMIT=TRUE; BEGIN TRANSACTION; SELECT 1 AS TEST; COMMIT; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:35.050000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:35.050000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.528000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.528000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=TRUE; @@ -12202,8 +12202,8 @@ SET AUTOCOMMIT=TRUE; BEGIN TRANSACTION; SELECT 1 AS TEST; COMMIT; -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:35.050000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-04-22T15:43:35.050000000Z' +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.528000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-09-09T09:13:30.528000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=TRUE; @@ -12586,15 +12586,15 @@ NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=TRUE; BEGIN TRANSACTION; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:35.106000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:35.106000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.584000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.584000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=TRUE; BEGIN TRANSACTION; @EXPECT EXCEPTION FAILED_PRECONDITION -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:35.106000000Z'; +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.584000000Z'; NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=TRUE; @@ -12932,15 +12932,15 @@ NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=TRUE; SELECT 1 AS TEST; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:35.157000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:35.157000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.632000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.632000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=TRUE; SELECT 1 AS TEST; -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:35.157000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-04-22T15:43:35.157000000Z' +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.632000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-09-09T09:13:30.632000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=TRUE; @@ -13287,15 +13287,15 @@ NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=TRUE; SELECT 1 AS TEST; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:35.213000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:35.213000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.683000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.683000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=TRUE; SELECT 1 AS TEST; -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:35.213000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-04-22T15:43:35.213000000Z' +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.683000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-09-09T09:13:30.683000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=TRUE; @@ -13612,14 +13612,14 @@ SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=TRUE; -SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:35.269000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:35.269000000Z' +SET READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.733000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.733000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=TRUE; SET AUTOCOMMIT=TRUE; -SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:35.269000000Z'; -@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-04-22T15:43:35.269000000Z' +SET READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.733000000Z'; +@EXPECT RESULT_SET 'READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-09-09T09:13:30.733000000Z' SHOW VARIABLE READ_ONLY_STALENESS; NEW_CONNECTION; SET READONLY=TRUE; diff --git a/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITDdlTest.sql b/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITDdlTest.sql index 2dea0423151..2efc59ed36e 100644 --- a/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITDdlTest.sql +++ b/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/ITDdlTest.sql @@ -127,7 +127,7 @@ WHERE TABLE_NAME='VALID_MULTIPLE_DDL_IN_DDL_BATCH_1' OR TABLE_NAME='VALID_MULTIP NEW_CONNECTION; -/* +/* * Do a test that shows that a DDL batch might only execute some of the statements, * for example if data in a table prevents a unique index from being created. */ @@ -187,3 +187,44 @@ RUN BATCH; START BATCH DDL; ABORT BATCH; + +NEW_CONNECTION; +-- Set proto descriptors using relative path to the descriptors.pb file. This gets applied for next DDL statement +SET PROTO_DESCRIPTORS_FILE_PATH = 'src/test/resources/com/google/cloud/spanner/descriptors.pb'; +-- Check if Proto descriptors is set +@EXPECT RESULT_SET 'PROTO_DESCRIPTORS_FILE_PATH' +SHOW VARIABLE PROTO_DESCRIPTORS_FILE_PATH; + +CREATE PROTO BUNDLE (examples.spanner.music.Genre); +-- Check if Proto descriptors is reset to null +@EXPECT RESULT_SET 'PROTO_DESCRIPTORS',null +SHOW VARIABLE PROTO_DESCRIPTORS; +@EXPECT RESULT_SET 'PROTO_DESCRIPTORS_FILE_PATH',null +SHOW VARIABLE PROTO_DESCRIPTORS_FILE_PATH; + +-- Set Proto Descriptor as base64 string. This gets applied to all statements in next DDL batch +SET PROTO_DESCRIPTORS = 'CvYCCgxzaW5nZXIucHJvdG8SFmV4YW1wbGVzLnNwYW5uZXIubXVzaWMi6gEKClNpbmdlckluZm8SIAoJc2luZ2VyX2lkGAEgASgDSABSCHNpbmdlcklkiAEBEiIKCmJpcnRoX2RhdGUYAiABKAlIAVIJYmlydGhEYXRliAEBEiUKC25hdGlvbmFsaXR5GAMgASgJSAJSC25hdGlvbmFsaXR5iAEBEjgKBWdlbnJlGAQgASgOMh0uZXhhbXBsZXMuc3Bhbm5lci5tdXNpYy5HZW5yZUgDUgVnZW5yZYgBAUIMCgpfc2luZ2VyX2lkQg0KC19iaXJ0aF9kYXRlQg4KDF9uYXRpb25hbGl0eUIICgZfZ2VucmUqLgoFR2VucmUSBwoDUE9QEAASCAoESkFaWhABEggKBEZPTEsQAhIICgRST0NLEANCKQoYY29tLmdvb2dsZS5jbG91ZC5zcGFubmVyQgtTaW5nZXJQcm90b1AAYgZwcm90bzM='; + +@EXPECT RESULT_SET 'PROTO_DESCRIPTORS' +SHOW VARIABLE PROTO_DESCRIPTORS; + +START BATCH DDL; +ALTER PROTO BUNDLE INSERT (examples.spanner.music.SingerInfo); +CREATE TABLE Singers ( + SingerId INT64 NOT NULL, + FirstName STRING(1024), + LastName STRING(1024), + SingerInfo examples.spanner.music.SingerInfo, + SingerGenre examples.spanner.music.Genre +) PRIMARY KEY (SingerId); +-- Run the batch +RUN BATCH; + +-- Check if Proto descriptors is reset to null +@EXPECT RESULT_SET 'PROTO_DESCRIPTORS',null +SHOW VARIABLE PROTO_DESCRIPTORS; +-- Check that the table is created +@EXPECT RESULT_SET +SELECT COUNT(*) AS ACTUAL, 1 AS EXPECTED +FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_NAME='Singers'; diff --git a/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/postgresql/ClientSideStatementsTest.sql b/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/postgresql/ClientSideStatementsTest.sql index 5ffa722385d..7060684953b 100644 --- a/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/postgresql/ClientSideStatementsTest.sql +++ b/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/postgresql/ClientSideStatementsTest.sql @@ -7167,6 +7167,403 @@ NEW_CONNECTION; @EXPECT EXCEPTION UNIMPLEMENTED show variable/-spanner.transaction_tag; NEW_CONNECTION; +show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +SHOW SPANNER.EXCLUDE_TXN_FROM_CHANGE_STREAMS; +NEW_CONNECTION; +show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; + show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; + show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; + + + +show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +show spanner.exclude_txn_from_change_streams ; +NEW_CONNECTION; +show spanner.exclude_txn_from_change_streams ; +NEW_CONNECTION; +show spanner.exclude_txn_from_change_streams + +; +NEW_CONNECTION; +show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +show +spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show%spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show_spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show&spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show$spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show@spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show!spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show*spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show(spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show)spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show-spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show+spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show-#spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show/spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show\spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show?spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show-/spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show/#spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.exclude_txn_from_change_streams/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show/-spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.EXCLUDE_TXN_FROM_CHANGE_STREAMS; +NEW_CONNECTION; +show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; + show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; + show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; + + + +show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +show variable spanner.exclude_txn_from_change_streams ; +NEW_CONNECTION; +show variable spanner.exclude_txn_from_change_streams ; +NEW_CONNECTION; +show variable spanner.exclude_txn_from_change_streams + +; +NEW_CONNECTION; +show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +show +variable +spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.exclude_txn_from_change_streams/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.exclude_txn_from_change_streams; +NEW_CONNECTION; show spanner.rpc_priority; NEW_CONNECTION; SHOW SPANNER.RPC_PRIORITY; @@ -8358,6 +8755,403 @@ NEW_CONNECTION; @EXPECT EXCEPTION UNIMPLEMENTED show variable/-spanner.delay_transaction_start_until_first_write; NEW_CONNECTION; +show spanner.keep_transaction_alive; +NEW_CONNECTION; +SHOW SPANNER.KEEP_TRANSACTION_ALIVE; +NEW_CONNECTION; +show spanner.keep_transaction_alive; +NEW_CONNECTION; + show spanner.keep_transaction_alive; +NEW_CONNECTION; + show spanner.keep_transaction_alive; +NEW_CONNECTION; + + + +show spanner.keep_transaction_alive; +NEW_CONNECTION; +show spanner.keep_transaction_alive ; +NEW_CONNECTION; +show spanner.keep_transaction_alive ; +NEW_CONNECTION; +show spanner.keep_transaction_alive + +; +NEW_CONNECTION; +show spanner.keep_transaction_alive; +NEW_CONNECTION; +show spanner.keep_transaction_alive; +NEW_CONNECTION; +show +spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show%spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show_spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show&spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show$spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show@spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show!spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show*spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show(spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show)spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show-spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show+spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show-#spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show/spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show\spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show?spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show-/spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show/#spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show spanner.keep_transaction_alive/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +show/-spanner.keep_transaction_alive; +NEW_CONNECTION; +show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +SHOW VARIABLE SPANNER.KEEP_TRANSACTION_ALIVE; +NEW_CONNECTION; +show variable spanner.keep_transaction_alive; +NEW_CONNECTION; + show variable spanner.keep_transaction_alive; +NEW_CONNECTION; + show variable spanner.keep_transaction_alive; +NEW_CONNECTION; + + + +show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +show variable spanner.keep_transaction_alive ; +NEW_CONNECTION; +show variable spanner.keep_transaction_alive ; +NEW_CONNECTION; +show variable spanner.keep_transaction_alive + +; +NEW_CONNECTION; +show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +show +variable +spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive%; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable%spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive_; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable_spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive&; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable&spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive$; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable$spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive@; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable@spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive!; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable!spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive*; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable*spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive(; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable(spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive); +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable)spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive+; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable+spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive-#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-#spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive\; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable\spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive?; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable?spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive-/; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable-/spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive/#; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/#spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-show variable spanner.keep_transaction_alive; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable spanner.keep_transaction_alive/-; +NEW_CONNECTION; +@EXPECT EXCEPTION UNIMPLEMENTED +show variable/-spanner.keep_transaction_alive; +NEW_CONNECTION; show transaction isolation level; NEW_CONNECTION; SHOW TRANSACTION ISOLATION LEVEL; @@ -44765,6 +45559,204 @@ start batch ddl; @EXPECT EXCEPTION INVALID_ARGUMENT abort/-batch; NEW_CONNECTION; +reset all; +NEW_CONNECTION; +RESET ALL; +NEW_CONNECTION; +reset all; +NEW_CONNECTION; + reset all; +NEW_CONNECTION; + reset all; +NEW_CONNECTION; + + + +reset all; +NEW_CONNECTION; +reset all ; +NEW_CONNECTION; +reset all ; +NEW_CONNECTION; +reset all + +; +NEW_CONNECTION; +reset all; +NEW_CONNECTION; +reset all; +NEW_CONNECTION; +reset +all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset%all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset_all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset&all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset$all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset@all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset!all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset*all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset(all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset)all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset-all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset+all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset-#all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset/all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset\all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset?all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset-/all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset/#all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-reset all; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset all/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +reset/-all; +NEW_CONNECTION; set autocommit = true; NEW_CONNECTION; SET AUTOCOMMIT = TRUE; @@ -48703,6 +49695,207 @@ NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT set/-statement_timeout=default; NEW_CONNECTION; +set statement_timeout = default ; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT = DEFAULT ; +NEW_CONNECTION; +set statement_timeout = default ; +NEW_CONNECTION; + set statement_timeout = default ; +NEW_CONNECTION; + set statement_timeout = default ; +NEW_CONNECTION; + + + +set statement_timeout = default ; +NEW_CONNECTION; +set statement_timeout = default ; +NEW_CONNECTION; +set statement_timeout = default ; +NEW_CONNECTION; +set statement_timeout = default + +; +NEW_CONNECTION; +set statement_timeout = default ; +NEW_CONNECTION; +set statement_timeout = default ; +NEW_CONNECTION; +set +statement_timeout += +default +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default %; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default _; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default &; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default $; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default @; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default !; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default *; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default (; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default ); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default -; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default -#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default /; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default \; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default ?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default -/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default /#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout = default ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default /-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = default/-; +NEW_CONNECTION; set statement_timeout='1s'; NEW_CONNECTION; SET STATEMENT_TIMEOUT='1S'; @@ -48901,6 +50094,207 @@ NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT set/-statement_timeout='1s'; NEW_CONNECTION; +set statement_timeout = '1s' ; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT = '1S' ; +NEW_CONNECTION; +set statement_timeout = '1s' ; +NEW_CONNECTION; + set statement_timeout = '1s' ; +NEW_CONNECTION; + set statement_timeout = '1s' ; +NEW_CONNECTION; + + + +set statement_timeout = '1s' ; +NEW_CONNECTION; +set statement_timeout = '1s' ; +NEW_CONNECTION; +set statement_timeout = '1s' ; +NEW_CONNECTION; +set statement_timeout = '1s' + +; +NEW_CONNECTION; +set statement_timeout = '1s' ; +NEW_CONNECTION; +set statement_timeout = '1s' ; +NEW_CONNECTION; +set +statement_timeout += +'1s' +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' %; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' _; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' &; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' $; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' @; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' !; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' *; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' (; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' ); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' -; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' -#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' /; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' \; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' ?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' -/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' /#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout = '1s' ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s' /-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = '1s'/-; +NEW_CONNECTION; set statement_timeout='100ms'; NEW_CONNECTION; SET STATEMENT_TIMEOUT='100MS'; @@ -49297,6 +50691,207 @@ NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT set/-statement_timeout=100; NEW_CONNECTION; +set statement_timeout = 100 ; +NEW_CONNECTION; +SET STATEMENT_TIMEOUT = 100 ; +NEW_CONNECTION; +set statement_timeout = 100 ; +NEW_CONNECTION; + set statement_timeout = 100 ; +NEW_CONNECTION; + set statement_timeout = 100 ; +NEW_CONNECTION; + + + +set statement_timeout = 100 ; +NEW_CONNECTION; +set statement_timeout = 100 ; +NEW_CONNECTION; +set statement_timeout = 100 ; +NEW_CONNECTION; +set statement_timeout = 100 + +; +NEW_CONNECTION; +set statement_timeout = 100 ; +NEW_CONNECTION; +set statement_timeout = 100 ; +NEW_CONNECTION; +set +statement_timeout += +100 +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 %; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 _; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 &; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 $; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 @; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 !; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 *; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 (; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 ); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 -; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 +; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 -#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 /; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 \; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 ?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 -/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 /#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set statement_timeout = 100 ; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100 /-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set statement_timeout = 100/-; +NEW_CONNECTION; set statement_timeout='10000us'; NEW_CONNECTION; SET STATEMENT_TIMEOUT='10000US'; @@ -66723,7651 +68318,10652 @@ NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT set/-spanner.max_commit_delay=null; NEW_CONNECTION; -set spanner.max_commit_delay='1s'; +set spanner.max_commit_delay = null; NEW_CONNECTION; -SET SPANNER.MAX_COMMIT_DELAY='1S'; +SET SPANNER.MAX_COMMIT_DELAY = NULL; NEW_CONNECTION; -set spanner.max_commit_delay='1s'; +set spanner.max_commit_delay = null; NEW_CONNECTION; - set spanner.max_commit_delay='1s'; + set spanner.max_commit_delay = null; NEW_CONNECTION; - set spanner.max_commit_delay='1s'; + set spanner.max_commit_delay = null; NEW_CONNECTION; -set spanner.max_commit_delay='1s'; +set spanner.max_commit_delay = null; NEW_CONNECTION; -set spanner.max_commit_delay='1s' ; +set spanner.max_commit_delay = null ; NEW_CONNECTION; -set spanner.max_commit_delay='1s' ; +set spanner.max_commit_delay = null ; NEW_CONNECTION; -set spanner.max_commit_delay='1s' +set spanner.max_commit_delay = null ; NEW_CONNECTION; -set spanner.max_commit_delay='1s'; +set spanner.max_commit_delay = null; NEW_CONNECTION; -set spanner.max_commit_delay='1s'; +set spanner.max_commit_delay = null; NEW_CONNECTION; set -spanner.max_commit_delay='1s'; +spanner.max_commit_delay += +null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.max_commit_delay='1s'; +foo set spanner.max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='1s' bar; +set spanner.max_commit_delay = null bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.max_commit_delay='1s'; +%set spanner.max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='1s'%; +set spanner.max_commit_delay = null%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%spanner.max_commit_delay='1s'; +set spanner.max_commit_delay =%null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.max_commit_delay='1s'; +_set spanner.max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='1s'_; +set spanner.max_commit_delay = null_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_spanner.max_commit_delay='1s'; +set spanner.max_commit_delay =_null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.max_commit_delay='1s'; +&set spanner.max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='1s'&; +set spanner.max_commit_delay = null&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&spanner.max_commit_delay='1s'; +set spanner.max_commit_delay =&null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.max_commit_delay='1s'; +$set spanner.max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='1s'$; +set spanner.max_commit_delay = null$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$spanner.max_commit_delay='1s'; +set spanner.max_commit_delay =$null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.max_commit_delay='1s'; +@set spanner.max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='1s'@; +set spanner.max_commit_delay = null@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@spanner.max_commit_delay='1s'; +set spanner.max_commit_delay =@null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.max_commit_delay='1s'; +!set spanner.max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='1s'!; +set spanner.max_commit_delay = null!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!spanner.max_commit_delay='1s'; +set spanner.max_commit_delay =!null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.max_commit_delay='1s'; +*set spanner.max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='1s'*; +set spanner.max_commit_delay = null*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*spanner.max_commit_delay='1s'; +set spanner.max_commit_delay =*null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.max_commit_delay='1s'; +(set spanner.max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='1s'(; +set spanner.max_commit_delay = null(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(spanner.max_commit_delay='1s'; +set spanner.max_commit_delay =(null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.max_commit_delay='1s'; +)set spanner.max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='1s'); +set spanner.max_commit_delay = null); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)spanner.max_commit_delay='1s'; +set spanner.max_commit_delay =)null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.max_commit_delay='1s'; +-set spanner.max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='1s'-; +set spanner.max_commit_delay = null-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-spanner.max_commit_delay='1s'; +set spanner.max_commit_delay =-null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.max_commit_delay='1s'; ++set spanner.max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='1s'+; +set spanner.max_commit_delay = null+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+spanner.max_commit_delay='1s'; +set spanner.max_commit_delay =+null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.max_commit_delay='1s'; +-#set spanner.max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='1s'-#; +set spanner.max_commit_delay = null-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#spanner.max_commit_delay='1s'; +set spanner.max_commit_delay =-#null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.max_commit_delay='1s'; +/set spanner.max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='1s'/; +set spanner.max_commit_delay = null/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/spanner.max_commit_delay='1s'; +set spanner.max_commit_delay =/null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.max_commit_delay='1s'; +\set spanner.max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='1s'\; +set spanner.max_commit_delay = null\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\spanner.max_commit_delay='1s'; +set spanner.max_commit_delay =\null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.max_commit_delay='1s'; +?set spanner.max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='1s'?; +set spanner.max_commit_delay = null?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?spanner.max_commit_delay='1s'; +set spanner.max_commit_delay =?null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.max_commit_delay='1s'; +-/set spanner.max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='1s'-/; +set spanner.max_commit_delay = null-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/spanner.max_commit_delay='1s'; +set spanner.max_commit_delay =-/null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.max_commit_delay='1s'; +/#set spanner.max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='1s'/#; +set spanner.max_commit_delay = null/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#spanner.max_commit_delay='1s'; +set spanner.max_commit_delay =/#null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.max_commit_delay='1s'; +/-set spanner.max_commit_delay = null; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='1s'/-; +set spanner.max_commit_delay = null/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-spanner.max_commit_delay='1s'; +set spanner.max_commit_delay =/-null; NEW_CONNECTION; -set spanner.max_commit_delay='100ms'; +set spanner.max_commit_delay = null ; NEW_CONNECTION; -SET SPANNER.MAX_COMMIT_DELAY='100MS'; +SET SPANNER.MAX_COMMIT_DELAY = NULL ; NEW_CONNECTION; -set spanner.max_commit_delay='100ms'; +set spanner.max_commit_delay = null ; NEW_CONNECTION; - set spanner.max_commit_delay='100ms'; + set spanner.max_commit_delay = null ; NEW_CONNECTION; - set spanner.max_commit_delay='100ms'; + set spanner.max_commit_delay = null ; NEW_CONNECTION; -set spanner.max_commit_delay='100ms'; +set spanner.max_commit_delay = null ; NEW_CONNECTION; -set spanner.max_commit_delay='100ms' ; +set spanner.max_commit_delay = null ; NEW_CONNECTION; -set spanner.max_commit_delay='100ms' ; +set spanner.max_commit_delay = null ; NEW_CONNECTION; -set spanner.max_commit_delay='100ms' +set spanner.max_commit_delay = null ; NEW_CONNECTION; -set spanner.max_commit_delay='100ms'; +set spanner.max_commit_delay = null ; NEW_CONNECTION; -set spanner.max_commit_delay='100ms'; +set spanner.max_commit_delay = null ; NEW_CONNECTION; set -spanner.max_commit_delay='100ms'; +spanner.max_commit_delay += +null +; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.max_commit_delay='100ms'; +foo set spanner.max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='100ms' bar; +set spanner.max_commit_delay = null bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.max_commit_delay='100ms'; +%set spanner.max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='100ms'%; +set spanner.max_commit_delay = null %; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%spanner.max_commit_delay='100ms'; +set spanner.max_commit_delay = null%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.max_commit_delay='100ms'; +_set spanner.max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='100ms'_; +set spanner.max_commit_delay = null _; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_spanner.max_commit_delay='100ms'; +set spanner.max_commit_delay = null_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.max_commit_delay='100ms'; +&set spanner.max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='100ms'&; +set spanner.max_commit_delay = null &; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&spanner.max_commit_delay='100ms'; +set spanner.max_commit_delay = null&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.max_commit_delay='100ms'; +$set spanner.max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='100ms'$; +set spanner.max_commit_delay = null $; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$spanner.max_commit_delay='100ms'; +set spanner.max_commit_delay = null$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.max_commit_delay='100ms'; +@set spanner.max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='100ms'@; +set spanner.max_commit_delay = null @; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@spanner.max_commit_delay='100ms'; +set spanner.max_commit_delay = null@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.max_commit_delay='100ms'; +!set spanner.max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='100ms'!; +set spanner.max_commit_delay = null !; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!spanner.max_commit_delay='100ms'; +set spanner.max_commit_delay = null!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.max_commit_delay='100ms'; +*set spanner.max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='100ms'*; +set spanner.max_commit_delay = null *; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*spanner.max_commit_delay='100ms'; +set spanner.max_commit_delay = null*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.max_commit_delay='100ms'; +(set spanner.max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='100ms'(; +set spanner.max_commit_delay = null (; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(spanner.max_commit_delay='100ms'; +set spanner.max_commit_delay = null(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.max_commit_delay='100ms'; +)set spanner.max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='100ms'); +set spanner.max_commit_delay = null ); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)spanner.max_commit_delay='100ms'; +set spanner.max_commit_delay = null); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.max_commit_delay='100ms'; +-set spanner.max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='100ms'-; +set spanner.max_commit_delay = null -; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-spanner.max_commit_delay='100ms'; +set spanner.max_commit_delay = null-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.max_commit_delay='100ms'; ++set spanner.max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='100ms'+; +set spanner.max_commit_delay = null +; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+spanner.max_commit_delay='100ms'; +set spanner.max_commit_delay = null+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.max_commit_delay='100ms'; +-#set spanner.max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='100ms'-#; +set spanner.max_commit_delay = null -#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#spanner.max_commit_delay='100ms'; +set spanner.max_commit_delay = null-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.max_commit_delay='100ms'; +/set spanner.max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='100ms'/; +set spanner.max_commit_delay = null /; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/spanner.max_commit_delay='100ms'; +set spanner.max_commit_delay = null/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.max_commit_delay='100ms'; +\set spanner.max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='100ms'\; +set spanner.max_commit_delay = null \; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\spanner.max_commit_delay='100ms'; +set spanner.max_commit_delay = null\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.max_commit_delay='100ms'; +?set spanner.max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='100ms'?; +set spanner.max_commit_delay = null ?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?spanner.max_commit_delay='100ms'; +set spanner.max_commit_delay = null?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.max_commit_delay='100ms'; +-/set spanner.max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='100ms'-/; +set spanner.max_commit_delay = null -/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/spanner.max_commit_delay='100ms'; +set spanner.max_commit_delay = null-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.max_commit_delay='100ms'; +/#set spanner.max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='100ms'/#; +set spanner.max_commit_delay = null /#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#spanner.max_commit_delay='100ms'; +set spanner.max_commit_delay = null/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.max_commit_delay='100ms'; +/-set spanner.max_commit_delay = null ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay='100ms'/-; +set spanner.max_commit_delay = null /-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-spanner.max_commit_delay='100ms'; +set spanner.max_commit_delay = null/-; NEW_CONNECTION; -set spanner.max_commit_delay to '10000us'; +set spanner.max_commit_delay='1s'; NEW_CONNECTION; -SET SPANNER.MAX_COMMIT_DELAY TO '10000US'; +SET SPANNER.MAX_COMMIT_DELAY='1S'; NEW_CONNECTION; -set spanner.max_commit_delay to '10000us'; +set spanner.max_commit_delay='1s'; NEW_CONNECTION; - set spanner.max_commit_delay to '10000us'; + set spanner.max_commit_delay='1s'; NEW_CONNECTION; - set spanner.max_commit_delay to '10000us'; + set spanner.max_commit_delay='1s'; NEW_CONNECTION; -set spanner.max_commit_delay to '10000us'; +set spanner.max_commit_delay='1s'; NEW_CONNECTION; -set spanner.max_commit_delay to '10000us' ; +set spanner.max_commit_delay='1s' ; NEW_CONNECTION; -set spanner.max_commit_delay to '10000us' ; +set spanner.max_commit_delay='1s' ; NEW_CONNECTION; -set spanner.max_commit_delay to '10000us' +set spanner.max_commit_delay='1s' ; NEW_CONNECTION; -set spanner.max_commit_delay to '10000us'; +set spanner.max_commit_delay='1s'; NEW_CONNECTION; -set spanner.max_commit_delay to '10000us'; +set spanner.max_commit_delay='1s'; NEW_CONNECTION; set -spanner.max_commit_delay -to -'10000us'; +spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.max_commit_delay to '10000us'; +foo set spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to '10000us' bar; +set spanner.max_commit_delay='1s' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.max_commit_delay to '10000us'; +%set spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to '10000us'%; +set spanner.max_commit_delay='1s'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to%'10000us'; +set%spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.max_commit_delay to '10000us'; +_set spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to '10000us'_; +set spanner.max_commit_delay='1s'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to_'10000us'; +set_spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.max_commit_delay to '10000us'; +&set spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to '10000us'&; +set spanner.max_commit_delay='1s'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to&'10000us'; +set&spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.max_commit_delay to '10000us'; +$set spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to '10000us'$; +set spanner.max_commit_delay='1s'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to$'10000us'; +set$spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.max_commit_delay to '10000us'; +@set spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to '10000us'@; +set spanner.max_commit_delay='1s'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to@'10000us'; +set@spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.max_commit_delay to '10000us'; +!set spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to '10000us'!; +set spanner.max_commit_delay='1s'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to!'10000us'; +set!spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.max_commit_delay to '10000us'; +*set spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to '10000us'*; +set spanner.max_commit_delay='1s'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to*'10000us'; +set*spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.max_commit_delay to '10000us'; +(set spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to '10000us'(; +set spanner.max_commit_delay='1s'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to('10000us'; +set(spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.max_commit_delay to '10000us'; +)set spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to '10000us'); +set spanner.max_commit_delay='1s'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to)'10000us'; +set)spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.max_commit_delay to '10000us'; +-set spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to '10000us'-; +set spanner.max_commit_delay='1s'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to-'10000us'; +set-spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.max_commit_delay to '10000us'; ++set spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to '10000us'+; +set spanner.max_commit_delay='1s'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to+'10000us'; +set+spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.max_commit_delay to '10000us'; +-#set spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to '10000us'-#; +set spanner.max_commit_delay='1s'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to-#'10000us'; +set-#spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.max_commit_delay to '10000us'; +/set spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to '10000us'/; +set spanner.max_commit_delay='1s'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to/'10000us'; +set/spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.max_commit_delay to '10000us'; +\set spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to '10000us'\; +set spanner.max_commit_delay='1s'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to\'10000us'; +set\spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.max_commit_delay to '10000us'; +?set spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to '10000us'?; +set spanner.max_commit_delay='1s'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to?'10000us'; +set?spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.max_commit_delay to '10000us'; +-/set spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to '10000us'-/; +set spanner.max_commit_delay='1s'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to-/'10000us'; +set-/spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.max_commit_delay to '10000us'; +/#set spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to '10000us'/#; +set spanner.max_commit_delay='1s'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to/#'10000us'; +set/#spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.max_commit_delay to '10000us'; +/-set spanner.max_commit_delay='1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to '10000us'/-; +set spanner.max_commit_delay='1s'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay to/-'10000us'; +set/-spanner.max_commit_delay='1s'; NEW_CONNECTION; -set spanner.max_commit_delay TO '9223372036854775807ns'; +set spanner.max_commit_delay = '1s'; NEW_CONNECTION; -SET SPANNER.MAX_COMMIT_DELAY TO '9223372036854775807NS'; +SET SPANNER.MAX_COMMIT_DELAY = '1S'; NEW_CONNECTION; -set spanner.max_commit_delay to '9223372036854775807ns'; +set spanner.max_commit_delay = '1s'; NEW_CONNECTION; - set spanner.max_commit_delay TO '9223372036854775807ns'; + set spanner.max_commit_delay = '1s'; NEW_CONNECTION; - set spanner.max_commit_delay TO '9223372036854775807ns'; + set spanner.max_commit_delay = '1s'; NEW_CONNECTION; -set spanner.max_commit_delay TO '9223372036854775807ns'; +set spanner.max_commit_delay = '1s'; NEW_CONNECTION; -set spanner.max_commit_delay TO '9223372036854775807ns' ; +set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; -set spanner.max_commit_delay TO '9223372036854775807ns' ; +set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; -set spanner.max_commit_delay TO '9223372036854775807ns' +set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; -set spanner.max_commit_delay TO '9223372036854775807ns'; +set spanner.max_commit_delay = '1s'; NEW_CONNECTION; -set spanner.max_commit_delay TO '9223372036854775807ns'; +set spanner.max_commit_delay = '1s'; NEW_CONNECTION; set spanner.max_commit_delay -TO -'9223372036854775807ns'; += +'1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.max_commit_delay TO '9223372036854775807ns'; +foo set spanner.max_commit_delay = '1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO '9223372036854775807ns' bar; +set spanner.max_commit_delay = '1s' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.max_commit_delay TO '9223372036854775807ns'; +%set spanner.max_commit_delay = '1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO '9223372036854775807ns'%; +set spanner.max_commit_delay = '1s'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO%'9223372036854775807ns'; +set spanner.max_commit_delay =%'1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.max_commit_delay TO '9223372036854775807ns'; +_set spanner.max_commit_delay = '1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO '9223372036854775807ns'_; +set spanner.max_commit_delay = '1s'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO_'9223372036854775807ns'; +set spanner.max_commit_delay =_'1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.max_commit_delay TO '9223372036854775807ns'; +&set spanner.max_commit_delay = '1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO '9223372036854775807ns'&; +set spanner.max_commit_delay = '1s'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO&'9223372036854775807ns'; +set spanner.max_commit_delay =&'1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.max_commit_delay TO '9223372036854775807ns'; +$set spanner.max_commit_delay = '1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO '9223372036854775807ns'$; +set spanner.max_commit_delay = '1s'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO$'9223372036854775807ns'; +set spanner.max_commit_delay =$'1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.max_commit_delay TO '9223372036854775807ns'; +@set spanner.max_commit_delay = '1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO '9223372036854775807ns'@; +set spanner.max_commit_delay = '1s'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO@'9223372036854775807ns'; +set spanner.max_commit_delay =@'1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.max_commit_delay TO '9223372036854775807ns'; +!set spanner.max_commit_delay = '1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO '9223372036854775807ns'!; +set spanner.max_commit_delay = '1s'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO!'9223372036854775807ns'; +set spanner.max_commit_delay =!'1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.max_commit_delay TO '9223372036854775807ns'; +*set spanner.max_commit_delay = '1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO '9223372036854775807ns'*; +set spanner.max_commit_delay = '1s'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO*'9223372036854775807ns'; +set spanner.max_commit_delay =*'1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.max_commit_delay TO '9223372036854775807ns'; +(set spanner.max_commit_delay = '1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO '9223372036854775807ns'(; +set spanner.max_commit_delay = '1s'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO('9223372036854775807ns'; +set spanner.max_commit_delay =('1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.max_commit_delay TO '9223372036854775807ns'; +)set spanner.max_commit_delay = '1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO '9223372036854775807ns'); +set spanner.max_commit_delay = '1s'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO)'9223372036854775807ns'; +set spanner.max_commit_delay =)'1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.max_commit_delay TO '9223372036854775807ns'; +-set spanner.max_commit_delay = '1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO '9223372036854775807ns'-; +set spanner.max_commit_delay = '1s'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO-'9223372036854775807ns'; +set spanner.max_commit_delay =-'1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.max_commit_delay TO '9223372036854775807ns'; ++set spanner.max_commit_delay = '1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO '9223372036854775807ns'+; +set spanner.max_commit_delay = '1s'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO+'9223372036854775807ns'; +set spanner.max_commit_delay =+'1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.max_commit_delay TO '9223372036854775807ns'; +-#set spanner.max_commit_delay = '1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO '9223372036854775807ns'-#; +set spanner.max_commit_delay = '1s'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO-#'9223372036854775807ns'; +set spanner.max_commit_delay =-#'1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.max_commit_delay TO '9223372036854775807ns'; +/set spanner.max_commit_delay = '1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO '9223372036854775807ns'/; +set spanner.max_commit_delay = '1s'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO/'9223372036854775807ns'; +set spanner.max_commit_delay =/'1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.max_commit_delay TO '9223372036854775807ns'; +\set spanner.max_commit_delay = '1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO '9223372036854775807ns'\; +set spanner.max_commit_delay = '1s'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO\'9223372036854775807ns'; +set spanner.max_commit_delay =\'1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.max_commit_delay TO '9223372036854775807ns'; +?set spanner.max_commit_delay = '1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO '9223372036854775807ns'?; +set spanner.max_commit_delay = '1s'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO?'9223372036854775807ns'; +set spanner.max_commit_delay =?'1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.max_commit_delay TO '9223372036854775807ns'; +-/set spanner.max_commit_delay = '1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO '9223372036854775807ns'-/; +set spanner.max_commit_delay = '1s'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO-/'9223372036854775807ns'; +set spanner.max_commit_delay =-/'1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.max_commit_delay TO '9223372036854775807ns'; +/#set spanner.max_commit_delay = '1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO '9223372036854775807ns'/#; +set spanner.max_commit_delay = '1s'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO/#'9223372036854775807ns'; +set spanner.max_commit_delay =/#'1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.max_commit_delay TO '9223372036854775807ns'; +/-set spanner.max_commit_delay = '1s'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO '9223372036854775807ns'/-; +set spanner.max_commit_delay = '1s'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.max_commit_delay TO/-'9223372036854775807ns'; +set spanner.max_commit_delay =/-'1s'; NEW_CONNECTION; -set spanner.statement_tag='tag1'; +set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; -SET SPANNER.STATEMENT_TAG='TAG1'; +SET SPANNER.MAX_COMMIT_DELAY = '1S' ; NEW_CONNECTION; -set spanner.statement_tag='tag1'; +set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; - set spanner.statement_tag='tag1'; + set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; - set spanner.statement_tag='tag1'; + set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; -set spanner.statement_tag='tag1'; +set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; -set spanner.statement_tag='tag1' ; +set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; -set spanner.statement_tag='tag1' ; +set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; -set spanner.statement_tag='tag1' +set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; -set spanner.statement_tag='tag1'; +set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; -set spanner.statement_tag='tag1'; +set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; set -spanner.statement_tag='tag1'; +spanner.max_commit_delay += +'1s' +; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.statement_tag='tag1'; +foo set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag1' bar; +set spanner.max_commit_delay = '1s' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.statement_tag='tag1'; +%set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag1'%; +set spanner.max_commit_delay = '1s' %; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%spanner.statement_tag='tag1'; +set spanner.max_commit_delay = '1s'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.statement_tag='tag1'; +_set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag1'_; +set spanner.max_commit_delay = '1s' _; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_spanner.statement_tag='tag1'; +set spanner.max_commit_delay = '1s'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.statement_tag='tag1'; +&set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag1'&; +set spanner.max_commit_delay = '1s' &; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&spanner.statement_tag='tag1'; +set spanner.max_commit_delay = '1s'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.statement_tag='tag1'; +$set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag1'$; +set spanner.max_commit_delay = '1s' $; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$spanner.statement_tag='tag1'; +set spanner.max_commit_delay = '1s'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.statement_tag='tag1'; +@set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag1'@; +set spanner.max_commit_delay = '1s' @; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@spanner.statement_tag='tag1'; +set spanner.max_commit_delay = '1s'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.statement_tag='tag1'; +!set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag1'!; +set spanner.max_commit_delay = '1s' !; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!spanner.statement_tag='tag1'; +set spanner.max_commit_delay = '1s'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.statement_tag='tag1'; +*set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag1'*; +set spanner.max_commit_delay = '1s' *; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*spanner.statement_tag='tag1'; +set spanner.max_commit_delay = '1s'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.statement_tag='tag1'; +(set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag1'(; +set spanner.max_commit_delay = '1s' (; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(spanner.statement_tag='tag1'; +set spanner.max_commit_delay = '1s'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.statement_tag='tag1'; +)set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag1'); +set spanner.max_commit_delay = '1s' ); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)spanner.statement_tag='tag1'; +set spanner.max_commit_delay = '1s'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.statement_tag='tag1'; +-set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag1'-; +set spanner.max_commit_delay = '1s' -; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-spanner.statement_tag='tag1'; +set spanner.max_commit_delay = '1s'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.statement_tag='tag1'; ++set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag1'+; +set spanner.max_commit_delay = '1s' +; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+spanner.statement_tag='tag1'; +set spanner.max_commit_delay = '1s'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.statement_tag='tag1'; +-#set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag1'-#; +set spanner.max_commit_delay = '1s' -#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#spanner.statement_tag='tag1'; +set spanner.max_commit_delay = '1s'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.statement_tag='tag1'; +/set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag1'/; +set spanner.max_commit_delay = '1s' /; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/spanner.statement_tag='tag1'; +set spanner.max_commit_delay = '1s'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.statement_tag='tag1'; +\set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag1'\; +set spanner.max_commit_delay = '1s' \; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\spanner.statement_tag='tag1'; +set spanner.max_commit_delay = '1s'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.statement_tag='tag1'; +?set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag1'?; +set spanner.max_commit_delay = '1s' ?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?spanner.statement_tag='tag1'; +set spanner.max_commit_delay = '1s'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.statement_tag='tag1'; +-/set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag1'-/; +set spanner.max_commit_delay = '1s' -/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/spanner.statement_tag='tag1'; +set spanner.max_commit_delay = '1s'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.statement_tag='tag1'; +/#set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag1'/#; +set spanner.max_commit_delay = '1s' /#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#spanner.statement_tag='tag1'; +set spanner.max_commit_delay = '1s'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.statement_tag='tag1'; +/-set spanner.max_commit_delay = '1s' ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag1'/-; +set spanner.max_commit_delay = '1s' /-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-spanner.statement_tag='tag1'; +set spanner.max_commit_delay = '1s'/-; NEW_CONNECTION; -set spanner.statement_tag='tag2'; +set spanner.max_commit_delay=1000; NEW_CONNECTION; -SET SPANNER.STATEMENT_TAG='TAG2'; +SET SPANNER.MAX_COMMIT_DELAY=1000; NEW_CONNECTION; -set spanner.statement_tag='tag2'; +set spanner.max_commit_delay=1000; NEW_CONNECTION; - set spanner.statement_tag='tag2'; + set spanner.max_commit_delay=1000; NEW_CONNECTION; - set spanner.statement_tag='tag2'; + set spanner.max_commit_delay=1000; NEW_CONNECTION; -set spanner.statement_tag='tag2'; +set spanner.max_commit_delay=1000; NEW_CONNECTION; -set spanner.statement_tag='tag2' ; +set spanner.max_commit_delay=1000 ; NEW_CONNECTION; -set spanner.statement_tag='tag2' ; +set spanner.max_commit_delay=1000 ; NEW_CONNECTION; -set spanner.statement_tag='tag2' +set spanner.max_commit_delay=1000 ; NEW_CONNECTION; -set spanner.statement_tag='tag2'; +set spanner.max_commit_delay=1000; NEW_CONNECTION; -set spanner.statement_tag='tag2'; +set spanner.max_commit_delay=1000; NEW_CONNECTION; set -spanner.statement_tag='tag2'; +spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.statement_tag='tag2'; +foo set spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag2' bar; +set spanner.max_commit_delay=1000 bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.statement_tag='tag2'; +%set spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag2'%; +set spanner.max_commit_delay=1000%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%spanner.statement_tag='tag2'; +set%spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.statement_tag='tag2'; +_set spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag2'_; +set spanner.max_commit_delay=1000_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_spanner.statement_tag='tag2'; +set_spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.statement_tag='tag2'; +&set spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag2'&; +set spanner.max_commit_delay=1000&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&spanner.statement_tag='tag2'; +set&spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.statement_tag='tag2'; +$set spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag2'$; +set spanner.max_commit_delay=1000$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$spanner.statement_tag='tag2'; +set$spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.statement_tag='tag2'; +@set spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag2'@; +set spanner.max_commit_delay=1000@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@spanner.statement_tag='tag2'; +set@spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.statement_tag='tag2'; +!set spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag2'!; +set spanner.max_commit_delay=1000!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!spanner.statement_tag='tag2'; +set!spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.statement_tag='tag2'; +*set spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag2'*; +set spanner.max_commit_delay=1000*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*spanner.statement_tag='tag2'; +set*spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.statement_tag='tag2'; +(set spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag2'(; +set spanner.max_commit_delay=1000(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(spanner.statement_tag='tag2'; +set(spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.statement_tag='tag2'; +)set spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag2'); +set spanner.max_commit_delay=1000); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)spanner.statement_tag='tag2'; +set)spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.statement_tag='tag2'; +-set spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag2'-; +set spanner.max_commit_delay=1000-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-spanner.statement_tag='tag2'; +set-spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.statement_tag='tag2'; ++set spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag2'+; +set spanner.max_commit_delay=1000+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+spanner.statement_tag='tag2'; +set+spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.statement_tag='tag2'; +-#set spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag2'-#; +set spanner.max_commit_delay=1000-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#spanner.statement_tag='tag2'; +set-#spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.statement_tag='tag2'; +/set spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag2'/; +set spanner.max_commit_delay=1000/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/spanner.statement_tag='tag2'; +set/spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.statement_tag='tag2'; +\set spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag2'\; +set spanner.max_commit_delay=1000\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\spanner.statement_tag='tag2'; +set\spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.statement_tag='tag2'; +?set spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag2'?; +set spanner.max_commit_delay=1000?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?spanner.statement_tag='tag2'; +set?spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.statement_tag='tag2'; +-/set spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag2'-/; +set spanner.max_commit_delay=1000-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/spanner.statement_tag='tag2'; +set-/spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.statement_tag='tag2'; +/#set spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag2'/#; +set spanner.max_commit_delay=1000/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#spanner.statement_tag='tag2'; +set/#spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.statement_tag='tag2'; +/-set spanner.max_commit_delay=1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='tag2'/-; +set spanner.max_commit_delay=1000/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-spanner.statement_tag='tag2'; +set/-spanner.max_commit_delay=1000; NEW_CONNECTION; -set spanner.statement_tag=''; +set spanner.max_commit_delay = 1000; NEW_CONNECTION; -SET SPANNER.STATEMENT_TAG=''; +SET SPANNER.MAX_COMMIT_DELAY = 1000; NEW_CONNECTION; -set spanner.statement_tag=''; +set spanner.max_commit_delay = 1000; NEW_CONNECTION; - set spanner.statement_tag=''; + set spanner.max_commit_delay = 1000; NEW_CONNECTION; - set spanner.statement_tag=''; + set spanner.max_commit_delay = 1000; NEW_CONNECTION; -set spanner.statement_tag=''; +set spanner.max_commit_delay = 1000; NEW_CONNECTION; -set spanner.statement_tag='' ; +set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; -set spanner.statement_tag='' ; +set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; -set spanner.statement_tag='' +set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; -set spanner.statement_tag=''; +set spanner.max_commit_delay = 1000; NEW_CONNECTION; -set spanner.statement_tag=''; +set spanner.max_commit_delay = 1000; NEW_CONNECTION; set -spanner.statement_tag=''; +spanner.max_commit_delay += +1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.statement_tag=''; +foo set spanner.max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag='' bar; +set spanner.max_commit_delay = 1000 bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.statement_tag=''; +%set spanner.max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag=''%; +set spanner.max_commit_delay = 1000%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%spanner.statement_tag=''; +set spanner.max_commit_delay =%1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.statement_tag=''; +_set spanner.max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag=''_; +set spanner.max_commit_delay = 1000_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_spanner.statement_tag=''; +set spanner.max_commit_delay =_1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.statement_tag=''; +&set spanner.max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag=''&; +set spanner.max_commit_delay = 1000&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&spanner.statement_tag=''; +set spanner.max_commit_delay =&1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.statement_tag=''; +$set spanner.max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag=''$; +set spanner.max_commit_delay = 1000$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$spanner.statement_tag=''; +set spanner.max_commit_delay =$1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.statement_tag=''; +@set spanner.max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag=''@; +set spanner.max_commit_delay = 1000@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@spanner.statement_tag=''; +set spanner.max_commit_delay =@1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.statement_tag=''; +!set spanner.max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag=''!; +set spanner.max_commit_delay = 1000!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!spanner.statement_tag=''; +set spanner.max_commit_delay =!1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.statement_tag=''; +*set spanner.max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag=''*; +set spanner.max_commit_delay = 1000*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*spanner.statement_tag=''; +set spanner.max_commit_delay =*1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.statement_tag=''; +(set spanner.max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag=''(; +set spanner.max_commit_delay = 1000(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(spanner.statement_tag=''; +set spanner.max_commit_delay =(1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.statement_tag=''; +)set spanner.max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag=''); +set spanner.max_commit_delay = 1000); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)spanner.statement_tag=''; +set spanner.max_commit_delay =)1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.statement_tag=''; +-set spanner.max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag=''-; +set spanner.max_commit_delay = 1000-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-spanner.statement_tag=''; +set spanner.max_commit_delay =-1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.statement_tag=''; ++set spanner.max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag=''+; +set spanner.max_commit_delay = 1000+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+spanner.statement_tag=''; +set spanner.max_commit_delay =+1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.statement_tag=''; +-#set spanner.max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag=''-#; +set spanner.max_commit_delay = 1000-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#spanner.statement_tag=''; +set spanner.max_commit_delay =-#1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.statement_tag=''; +/set spanner.max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag=''/; +set spanner.max_commit_delay = 1000/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/spanner.statement_tag=''; +set spanner.max_commit_delay =/1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.statement_tag=''; +\set spanner.max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag=''\; +set spanner.max_commit_delay = 1000\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\spanner.statement_tag=''; +set spanner.max_commit_delay =\1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.statement_tag=''; +?set spanner.max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag=''?; +set spanner.max_commit_delay = 1000?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?spanner.statement_tag=''; +set spanner.max_commit_delay =?1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.statement_tag=''; +-/set spanner.max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag=''-/; +set spanner.max_commit_delay = 1000-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/spanner.statement_tag=''; +set spanner.max_commit_delay =-/1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.statement_tag=''; +/#set spanner.max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag=''/#; +set spanner.max_commit_delay = 1000/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#spanner.statement_tag=''; +set spanner.max_commit_delay =/#1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.statement_tag=''; +/-set spanner.max_commit_delay = 1000; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag=''/-; +set spanner.max_commit_delay = 1000/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-spanner.statement_tag=''; +set spanner.max_commit_delay =/-1000; NEW_CONNECTION; -set spanner.statement_tag to 'tag1'; +set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; -SET SPANNER.STATEMENT_TAG TO 'TAG1'; +SET SPANNER.MAX_COMMIT_DELAY = 1000 ; NEW_CONNECTION; -set spanner.statement_tag to 'tag1'; +set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; - set spanner.statement_tag to 'tag1'; + set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; - set spanner.statement_tag to 'tag1'; + set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; -set spanner.statement_tag to 'tag1'; +set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; -set spanner.statement_tag to 'tag1' ; +set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; -set spanner.statement_tag to 'tag1' ; +set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; -set spanner.statement_tag to 'tag1' +set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; -set spanner.statement_tag to 'tag1'; +set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; -set spanner.statement_tag to 'tag1'; +set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; set -spanner.statement_tag -to -'tag1'; +spanner.max_commit_delay += +1000 +; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.statement_tag to 'tag1'; +foo set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag1' bar; +set spanner.max_commit_delay = 1000 bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.statement_tag to 'tag1'; +%set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag1'%; +set spanner.max_commit_delay = 1000 %; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to%'tag1'; +set spanner.max_commit_delay = 1000%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.statement_tag to 'tag1'; +_set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag1'_; +set spanner.max_commit_delay = 1000 _; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to_'tag1'; +set spanner.max_commit_delay = 1000_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.statement_tag to 'tag1'; +&set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag1'&; +set spanner.max_commit_delay = 1000 &; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to&'tag1'; +set spanner.max_commit_delay = 1000&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.statement_tag to 'tag1'; +$set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag1'$; +set spanner.max_commit_delay = 1000 $; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to$'tag1'; +set spanner.max_commit_delay = 1000$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.statement_tag to 'tag1'; +@set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag1'@; +set spanner.max_commit_delay = 1000 @; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to@'tag1'; +set spanner.max_commit_delay = 1000@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.statement_tag to 'tag1'; +!set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag1'!; +set spanner.max_commit_delay = 1000 !; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to!'tag1'; +set spanner.max_commit_delay = 1000!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.statement_tag to 'tag1'; +*set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag1'*; +set spanner.max_commit_delay = 1000 *; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to*'tag1'; +set spanner.max_commit_delay = 1000*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.statement_tag to 'tag1'; +(set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag1'(; +set spanner.max_commit_delay = 1000 (; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to('tag1'; +set spanner.max_commit_delay = 1000(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.statement_tag to 'tag1'; +)set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag1'); +set spanner.max_commit_delay = 1000 ); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to)'tag1'; +set spanner.max_commit_delay = 1000); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.statement_tag to 'tag1'; +-set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag1'-; +set spanner.max_commit_delay = 1000 -; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to-'tag1'; +set spanner.max_commit_delay = 1000-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.statement_tag to 'tag1'; ++set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag1'+; +set spanner.max_commit_delay = 1000 +; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to+'tag1'; +set spanner.max_commit_delay = 1000+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.statement_tag to 'tag1'; +-#set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag1'-#; +set spanner.max_commit_delay = 1000 -#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to-#'tag1'; +set spanner.max_commit_delay = 1000-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.statement_tag to 'tag1'; +/set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag1'/; +set spanner.max_commit_delay = 1000 /; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to/'tag1'; +set spanner.max_commit_delay = 1000/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.statement_tag to 'tag1'; +\set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag1'\; +set spanner.max_commit_delay = 1000 \; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to\'tag1'; +set spanner.max_commit_delay = 1000\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.statement_tag to 'tag1'; +?set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag1'?; +set spanner.max_commit_delay = 1000 ?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to?'tag1'; +set spanner.max_commit_delay = 1000?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.statement_tag to 'tag1'; +-/set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag1'-/; +set spanner.max_commit_delay = 1000 -/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to-/'tag1'; +set spanner.max_commit_delay = 1000-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.statement_tag to 'tag1'; +/#set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag1'/#; +set spanner.max_commit_delay = 1000 /#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to/#'tag1'; +set spanner.max_commit_delay = 1000/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.statement_tag to 'tag1'; +/-set spanner.max_commit_delay = 1000 ; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag1'/-; +set spanner.max_commit_delay = 1000 /-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to/-'tag1'; +set spanner.max_commit_delay = 1000/-; NEW_CONNECTION; -set spanner.statement_tag to 'tag2'; +set spanner.max_commit_delay='100ms'; NEW_CONNECTION; -SET SPANNER.STATEMENT_TAG TO 'TAG2'; +SET SPANNER.MAX_COMMIT_DELAY='100MS'; NEW_CONNECTION; -set spanner.statement_tag to 'tag2'; +set spanner.max_commit_delay='100ms'; NEW_CONNECTION; - set spanner.statement_tag to 'tag2'; + set spanner.max_commit_delay='100ms'; NEW_CONNECTION; - set spanner.statement_tag to 'tag2'; + set spanner.max_commit_delay='100ms'; NEW_CONNECTION; -set spanner.statement_tag to 'tag2'; +set spanner.max_commit_delay='100ms'; NEW_CONNECTION; -set spanner.statement_tag to 'tag2' ; +set spanner.max_commit_delay='100ms' ; NEW_CONNECTION; -set spanner.statement_tag to 'tag2' ; +set spanner.max_commit_delay='100ms' ; NEW_CONNECTION; -set spanner.statement_tag to 'tag2' +set spanner.max_commit_delay='100ms' ; NEW_CONNECTION; -set spanner.statement_tag to 'tag2'; +set spanner.max_commit_delay='100ms'; NEW_CONNECTION; -set spanner.statement_tag to 'tag2'; +set spanner.max_commit_delay='100ms'; NEW_CONNECTION; set -spanner.statement_tag -to -'tag2'; +spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.statement_tag to 'tag2'; +foo set spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag2' bar; +set spanner.max_commit_delay='100ms' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.statement_tag to 'tag2'; +%set spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag2'%; +set spanner.max_commit_delay='100ms'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to%'tag2'; +set%spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.statement_tag to 'tag2'; +_set spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag2'_; +set spanner.max_commit_delay='100ms'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to_'tag2'; +set_spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.statement_tag to 'tag2'; +&set spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag2'&; +set spanner.max_commit_delay='100ms'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to&'tag2'; +set&spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.statement_tag to 'tag2'; +$set spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag2'$; +set spanner.max_commit_delay='100ms'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to$'tag2'; +set$spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.statement_tag to 'tag2'; +@set spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag2'@; +set spanner.max_commit_delay='100ms'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to@'tag2'; +set@spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.statement_tag to 'tag2'; +!set spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag2'!; +set spanner.max_commit_delay='100ms'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to!'tag2'; +set!spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.statement_tag to 'tag2'; +*set spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag2'*; +set spanner.max_commit_delay='100ms'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to*'tag2'; +set*spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.statement_tag to 'tag2'; +(set spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag2'(; +set spanner.max_commit_delay='100ms'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to('tag2'; +set(spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.statement_tag to 'tag2'; +)set spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag2'); +set spanner.max_commit_delay='100ms'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to)'tag2'; +set)spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.statement_tag to 'tag2'; +-set spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag2'-; +set spanner.max_commit_delay='100ms'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to-'tag2'; +set-spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.statement_tag to 'tag2'; ++set spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag2'+; +set spanner.max_commit_delay='100ms'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to+'tag2'; +set+spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.statement_tag to 'tag2'; +-#set spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag2'-#; +set spanner.max_commit_delay='100ms'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to-#'tag2'; +set-#spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.statement_tag to 'tag2'; +/set spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag2'/; +set spanner.max_commit_delay='100ms'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to/'tag2'; +set/spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.statement_tag to 'tag2'; +\set spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag2'\; +set spanner.max_commit_delay='100ms'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to\'tag2'; +set\spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.statement_tag to 'tag2'; +?set spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag2'?; +set spanner.max_commit_delay='100ms'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to?'tag2'; +set?spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.statement_tag to 'tag2'; +-/set spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag2'-/; +set spanner.max_commit_delay='100ms'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to-/'tag2'; +set-/spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.statement_tag to 'tag2'; +/#set spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag2'/#; +set spanner.max_commit_delay='100ms'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to/#'tag2'; +set/#spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.statement_tag to 'tag2'; +/-set spanner.max_commit_delay='100ms'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'tag2'/-; +set spanner.max_commit_delay='100ms'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to/-'tag2'; +set/-spanner.max_commit_delay='100ms'; NEW_CONNECTION; -set spanner.statement_tag to ''; +set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; -SET SPANNER.STATEMENT_TAG TO ''; +SET SPANNER.MAX_COMMIT_DELAY TO '10000US'; NEW_CONNECTION; -set spanner.statement_tag to ''; +set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; - set spanner.statement_tag to ''; + set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; - set spanner.statement_tag to ''; + set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; -set spanner.statement_tag to ''; +set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; -set spanner.statement_tag to '' ; +set spanner.max_commit_delay to '10000us' ; NEW_CONNECTION; -set spanner.statement_tag to '' ; +set spanner.max_commit_delay to '10000us' ; NEW_CONNECTION; -set spanner.statement_tag to '' +set spanner.max_commit_delay to '10000us' ; NEW_CONNECTION; -set spanner.statement_tag to ''; +set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; -set spanner.statement_tag to ''; +set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; set -spanner.statement_tag +spanner.max_commit_delay to -''; +'10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.statement_tag to ''; +foo set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to '' bar; +set spanner.max_commit_delay to '10000us' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.statement_tag to ''; +%set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to ''%; +set spanner.max_commit_delay to '10000us'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to%''; +set spanner.max_commit_delay to%'10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.statement_tag to ''; +_set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to ''_; +set spanner.max_commit_delay to '10000us'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to_''; +set spanner.max_commit_delay to_'10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.statement_tag to ''; +&set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to ''&; +set spanner.max_commit_delay to '10000us'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to&''; +set spanner.max_commit_delay to&'10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.statement_tag to ''; +$set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to ''$; +set spanner.max_commit_delay to '10000us'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to$''; +set spanner.max_commit_delay to$'10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.statement_tag to ''; +@set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to ''@; +set spanner.max_commit_delay to '10000us'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to@''; +set spanner.max_commit_delay to@'10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.statement_tag to ''; +!set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to ''!; +set spanner.max_commit_delay to '10000us'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to!''; +set spanner.max_commit_delay to!'10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.statement_tag to ''; +*set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to ''*; +set spanner.max_commit_delay to '10000us'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to*''; +set spanner.max_commit_delay to*'10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.statement_tag to ''; +(set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to ''(; +set spanner.max_commit_delay to '10000us'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to(''; +set spanner.max_commit_delay to('10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.statement_tag to ''; +)set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to ''); +set spanner.max_commit_delay to '10000us'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to)''; +set spanner.max_commit_delay to)'10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.statement_tag to ''; +-set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to ''-; +set spanner.max_commit_delay to '10000us'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to-''; +set spanner.max_commit_delay to-'10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.statement_tag to ''; ++set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to ''+; +set spanner.max_commit_delay to '10000us'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to+''; +set spanner.max_commit_delay to+'10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.statement_tag to ''; +-#set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to ''-#; +set spanner.max_commit_delay to '10000us'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to-#''; +set spanner.max_commit_delay to-#'10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.statement_tag to ''; +/set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to ''/; +set spanner.max_commit_delay to '10000us'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to/''; +set spanner.max_commit_delay to/'10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.statement_tag to ''; +\set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to ''\; +set spanner.max_commit_delay to '10000us'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to\''; +set spanner.max_commit_delay to\'10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.statement_tag to ''; +?set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to ''?; +set spanner.max_commit_delay to '10000us'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to?''; +set spanner.max_commit_delay to?'10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.statement_tag to ''; +-/set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to ''-/; +set spanner.max_commit_delay to '10000us'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to-/''; +set spanner.max_commit_delay to-/'10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.statement_tag to ''; +/#set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to ''/#; +set spanner.max_commit_delay to '10000us'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to/#''; +set spanner.max_commit_delay to/#'10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.statement_tag to ''; +/-set spanner.max_commit_delay to '10000us'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to ''/-; +set spanner.max_commit_delay to '10000us'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to/-''; +set spanner.max_commit_delay to/-'10000us'; NEW_CONNECTION; -set spanner.statement_tag to 'test_tag'; +set spanner.max_commit_delay TO '9223372036854775807ns'; NEW_CONNECTION; -SET SPANNER.STATEMENT_TAG TO 'TEST_TAG'; +SET SPANNER.MAX_COMMIT_DELAY TO '9223372036854775807NS'; NEW_CONNECTION; -set spanner.statement_tag to 'test_tag'; +set spanner.max_commit_delay to '9223372036854775807ns'; NEW_CONNECTION; - set spanner.statement_tag to 'test_tag'; + set spanner.max_commit_delay TO '9223372036854775807ns'; NEW_CONNECTION; - set spanner.statement_tag to 'test_tag'; + set spanner.max_commit_delay TO '9223372036854775807ns'; NEW_CONNECTION; -set spanner.statement_tag to 'test_tag'; +set spanner.max_commit_delay TO '9223372036854775807ns'; NEW_CONNECTION; -set spanner.statement_tag to 'test_tag' ; +set spanner.max_commit_delay TO '9223372036854775807ns' ; NEW_CONNECTION; -set spanner.statement_tag to 'test_tag' ; +set spanner.max_commit_delay TO '9223372036854775807ns' ; NEW_CONNECTION; -set spanner.statement_tag to 'test_tag' +set spanner.max_commit_delay TO '9223372036854775807ns' ; NEW_CONNECTION; -set spanner.statement_tag to 'test_tag'; +set spanner.max_commit_delay TO '9223372036854775807ns'; NEW_CONNECTION; -set spanner.statement_tag to 'test_tag'; +set spanner.max_commit_delay TO '9223372036854775807ns'; NEW_CONNECTION; set -spanner.statement_tag -to -'test_tag'; +spanner.max_commit_delay +TO +'9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.statement_tag to 'test_tag'; +foo set spanner.max_commit_delay TO '9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'test_tag' bar; +set spanner.max_commit_delay TO '9223372036854775807ns' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.statement_tag to 'test_tag'; +%set spanner.max_commit_delay TO '9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'test_tag'%; +set spanner.max_commit_delay TO '9223372036854775807ns'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to%'test_tag'; +set spanner.max_commit_delay TO%'9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.statement_tag to 'test_tag'; +_set spanner.max_commit_delay TO '9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'test_tag'_; +set spanner.max_commit_delay TO '9223372036854775807ns'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to_'test_tag'; +set spanner.max_commit_delay TO_'9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.statement_tag to 'test_tag'; +&set spanner.max_commit_delay TO '9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'test_tag'&; +set spanner.max_commit_delay TO '9223372036854775807ns'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to&'test_tag'; +set spanner.max_commit_delay TO&'9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.statement_tag to 'test_tag'; +$set spanner.max_commit_delay TO '9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'test_tag'$; +set spanner.max_commit_delay TO '9223372036854775807ns'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to$'test_tag'; +set spanner.max_commit_delay TO$'9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.statement_tag to 'test_tag'; +@set spanner.max_commit_delay TO '9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'test_tag'@; +set spanner.max_commit_delay TO '9223372036854775807ns'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to@'test_tag'; +set spanner.max_commit_delay TO@'9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.statement_tag to 'test_tag'; +!set spanner.max_commit_delay TO '9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'test_tag'!; +set spanner.max_commit_delay TO '9223372036854775807ns'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to!'test_tag'; +set spanner.max_commit_delay TO!'9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.statement_tag to 'test_tag'; +*set spanner.max_commit_delay TO '9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'test_tag'*; +set spanner.max_commit_delay TO '9223372036854775807ns'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to*'test_tag'; +set spanner.max_commit_delay TO*'9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.statement_tag to 'test_tag'; +(set spanner.max_commit_delay TO '9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'test_tag'(; +set spanner.max_commit_delay TO '9223372036854775807ns'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to('test_tag'; +set spanner.max_commit_delay TO('9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.statement_tag to 'test_tag'; +)set spanner.max_commit_delay TO '9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'test_tag'); +set spanner.max_commit_delay TO '9223372036854775807ns'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to)'test_tag'; +set spanner.max_commit_delay TO)'9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.statement_tag to 'test_tag'; +-set spanner.max_commit_delay TO '9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'test_tag'-; +set spanner.max_commit_delay TO '9223372036854775807ns'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to-'test_tag'; +set spanner.max_commit_delay TO-'9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.statement_tag to 'test_tag'; ++set spanner.max_commit_delay TO '9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'test_tag'+; +set spanner.max_commit_delay TO '9223372036854775807ns'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to+'test_tag'; +set spanner.max_commit_delay TO+'9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.statement_tag to 'test_tag'; +-#set spanner.max_commit_delay TO '9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'test_tag'-#; +set spanner.max_commit_delay TO '9223372036854775807ns'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to-#'test_tag'; +set spanner.max_commit_delay TO-#'9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.statement_tag to 'test_tag'; +/set spanner.max_commit_delay TO '9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'test_tag'/; +set spanner.max_commit_delay TO '9223372036854775807ns'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to/'test_tag'; +set spanner.max_commit_delay TO/'9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.statement_tag to 'test_tag'; +\set spanner.max_commit_delay TO '9223372036854775807ns'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'test_tag'\; +set spanner.max_commit_delay TO '9223372036854775807ns'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to\'test_tag'; +set spanner.max_commit_delay TO\'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO '9223372036854775807ns'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO?'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO '9223372036854775807ns'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO-/'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO '9223372036854775807ns'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO/#'9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.max_commit_delay TO '9223372036854775807ns'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO '9223372036854775807ns'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.max_commit_delay TO/-'9223372036854775807ns'; +NEW_CONNECTION; +set spanner.statement_tag='tag1'; +NEW_CONNECTION; +SET SPANNER.STATEMENT_TAG='TAG1'; +NEW_CONNECTION; +set spanner.statement_tag='tag1'; +NEW_CONNECTION; + set spanner.statement_tag='tag1'; +NEW_CONNECTION; + set spanner.statement_tag='tag1'; +NEW_CONNECTION; + + + +set spanner.statement_tag='tag1'; +NEW_CONNECTION; +set spanner.statement_tag='tag1' ; +NEW_CONNECTION; +set spanner.statement_tag='tag1' ; +NEW_CONNECTION; +set spanner.statement_tag='tag1' + +; +NEW_CONNECTION; +set spanner.statement_tag='tag1'; +NEW_CONNECTION; +set spanner.statement_tag='tag1'; +NEW_CONNECTION; +set +spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.statement_tag='tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag1'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.statement_tag='tag1'; +NEW_CONNECTION; +set spanner.statement_tag='tag2'; +NEW_CONNECTION; +SET SPANNER.STATEMENT_TAG='TAG2'; +NEW_CONNECTION; +set spanner.statement_tag='tag2'; +NEW_CONNECTION; + set spanner.statement_tag='tag2'; +NEW_CONNECTION; + set spanner.statement_tag='tag2'; +NEW_CONNECTION; + + + +set spanner.statement_tag='tag2'; +NEW_CONNECTION; +set spanner.statement_tag='tag2' ; +NEW_CONNECTION; +set spanner.statement_tag='tag2' ; +NEW_CONNECTION; +set spanner.statement_tag='tag2' + +; +NEW_CONNECTION; +set spanner.statement_tag='tag2'; +NEW_CONNECTION; +set spanner.statement_tag='tag2'; +NEW_CONNECTION; +set +spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.statement_tag='tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='tag2'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.statement_tag='tag2'; +NEW_CONNECTION; +set spanner.statement_tag=''; +NEW_CONNECTION; +SET SPANNER.STATEMENT_TAG=''; +NEW_CONNECTION; +set spanner.statement_tag=''; +NEW_CONNECTION; + set spanner.statement_tag=''; +NEW_CONNECTION; + set spanner.statement_tag=''; +NEW_CONNECTION; + + + +set spanner.statement_tag=''; +NEW_CONNECTION; +set spanner.statement_tag='' ; +NEW_CONNECTION; +set spanner.statement_tag='' ; +NEW_CONNECTION; +set spanner.statement_tag='' + +; +NEW_CONNECTION; +set spanner.statement_tag=''; +NEW_CONNECTION; +set spanner.statement_tag=''; +NEW_CONNECTION; +set +spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag='' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.statement_tag=''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag=''/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.statement_tag=''; +NEW_CONNECTION; +set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +SET SPANNER.STATEMENT_TAG TO 'TAG1'; +NEW_CONNECTION; +set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; + set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; + set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; + + + +set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +set spanner.statement_tag to 'tag1' ; +NEW_CONNECTION; +set spanner.statement_tag to 'tag1' ; +NEW_CONNECTION; +set spanner.statement_tag to 'tag1' + +; +NEW_CONNECTION; +set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +set +spanner.statement_tag +to +'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to%'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to_'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to&'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to$'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to@'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to!'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to*'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to('tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to)'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to-'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to+'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to-#'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to/'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to\'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to?'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to-/'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to/#'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.statement_tag to 'tag1'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag1'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to/-'tag1'; +NEW_CONNECTION; +set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +SET SPANNER.STATEMENT_TAG TO 'TAG2'; +NEW_CONNECTION; +set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; + set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; + set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; + + + +set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +set spanner.statement_tag to 'tag2' ; +NEW_CONNECTION; +set spanner.statement_tag to 'tag2' ; +NEW_CONNECTION; +set spanner.statement_tag to 'tag2' + +; +NEW_CONNECTION; +set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +set +spanner.statement_tag +to +'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to%'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to_'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to&'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to$'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to@'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to!'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to*'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to('tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to)'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to-'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to+'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to-#'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to/'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to\'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to?'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to-/'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to/#'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.statement_tag to 'tag2'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'tag2'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to/-'tag2'; +NEW_CONNECTION; +set spanner.statement_tag to ''; +NEW_CONNECTION; +SET SPANNER.STATEMENT_TAG TO ''; +NEW_CONNECTION; +set spanner.statement_tag to ''; +NEW_CONNECTION; + set spanner.statement_tag to ''; +NEW_CONNECTION; + set spanner.statement_tag to ''; +NEW_CONNECTION; + + + +set spanner.statement_tag to ''; +NEW_CONNECTION; +set spanner.statement_tag to '' ; +NEW_CONNECTION; +set spanner.statement_tag to '' ; +NEW_CONNECTION; +set spanner.statement_tag to '' + +; +NEW_CONNECTION; +set spanner.statement_tag to ''; +NEW_CONNECTION; +set spanner.statement_tag to ''; +NEW_CONNECTION; +set +spanner.statement_tag +to +''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to '' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to%''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to_''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to&''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to$''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to@''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to!''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to*''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to(''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to)''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to-''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to+''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to-#''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to/''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to\''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to?''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to-/''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to/#''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.statement_tag to ''; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to ''/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to/-''; +NEW_CONNECTION; +set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +SET SPANNER.STATEMENT_TAG TO 'TEST_TAG'; +NEW_CONNECTION; +set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; + set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; + set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; + + + +set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +set spanner.statement_tag to 'test_tag' ; +NEW_CONNECTION; +set spanner.statement_tag to 'test_tag' ; +NEW_CONNECTION; +set spanner.statement_tag to 'test_tag' + +; +NEW_CONNECTION; +set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +set +spanner.statement_tag +to +'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag' bar; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'%; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to%'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'_; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to_'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'&; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to&'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to$'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to@'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to!'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to*'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to('test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to)'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to-'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to+'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to-#'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to/'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to\'test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT ?set spanner.statement_tag to 'test_tag'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'test_tag'?; +set spanner.statement_tag to 'test_tag'?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to?'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to-/'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to/#'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.statement_tag to 'test_tag'; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to 'test_tag'/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.statement_tag to/-'test_tag'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +SET SPANNER.TRANSACTION_TAG='TAG1'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; + + + +set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag1' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag1' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag1' + +; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +set +spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1' bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag1'/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.transaction_tag='tag1'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +SET SPANNER.TRANSACTION_TAG='TAG2'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; + + + +set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag2' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag2' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag2' + +; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +set +spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2' bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='tag2'/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.transaction_tag='tag2'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +SET SPANNER.TRANSACTION_TAG=''; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; + + + +set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag='' + +; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +set +spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag='' bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set%spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set_spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set&spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set$spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set@spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set!spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set*spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set(spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set)spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set+spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-#spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set\spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set?spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set-/spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/#spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag=''/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set/-spanner.transaction_tag=''; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +SET SPANNER.TRANSACTION_TAG TO 'TAG1'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; + + + +set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag1' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag1' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag1' + +; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +set +spanner.transaction_tag +to +'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1' bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to%'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to_'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to&'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to$'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to@'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to!'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to*'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to('tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to)'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to-'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to+'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to-#'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to/'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to\'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to?'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to-/'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to/#'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_tag to 'tag1'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag1'/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to/-'tag1'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +SET SPANNER.TRANSACTION_TAG TO 'TAG2'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; + + + +set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag2' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag2' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag2' + +; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +set +spanner.transaction_tag +to +'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2' bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to%'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to_'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to&'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to$'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to@'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to!'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to*'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to('tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to)'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to-'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to+'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to-#'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to/'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to\'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to?'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to-/'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to/#'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_tag to 'tag2'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'tag2'/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to/-'tag2'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +SET SPANNER.TRANSACTION_TAG TO ''; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; + + + +set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to '' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to '' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to '' + +; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +set +spanner.transaction_tag +to +''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to '' bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to%''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to_''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to&''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to$''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to@''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to!''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to*''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to(''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to)''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to-''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to+''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to-#''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to/''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to\''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to?''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to-/''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to/#''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.transaction_tag to ''; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to ''/-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to/-''; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +SET SPANNER.TRANSACTION_TAG TO 'TEST_TAG'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; + set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; + + + +set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'test_tag' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'test_tag' ; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'test_tag' + +; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +set +spanner.transaction_tag +to +'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +foo set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag' bar; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +%set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'%; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to%'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +_set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'_; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to_'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'&; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to&'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'$; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to$'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'@; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to@'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'!; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to!'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'*; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to*'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'(; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to('test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'); +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to)'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'-; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to-'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'+; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to+'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'-#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to-#'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to/'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'\; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to\'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'?; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to?'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'-/; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to-/'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.transaction_tag to 'test_tag'; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to 'test_tag'/#; +NEW_CONNECTION; +set autocommit = false; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.transaction_tag to/#'test_tag'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to?'test_tag'; +/-set spanner.transaction_tag to 'test_tag'; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.statement_tag to 'test_tag'; +set spanner.transaction_tag to 'test_tag'/-; NEW_CONNECTION; +set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'test_tag'-/; +set spanner.transaction_tag to/-'test_tag'; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +SET SPANNER.EXCLUDE_TXN_FROM_CHANGE_STREAMS = TRUE; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; + set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; + set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; + + + +set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams = true ; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams = true ; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams = true + +; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +set +spanner.exclude_txn_from_change_streams += +true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to-/'test_tag'; +foo set spanner.exclude_txn_from_change_streams = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.statement_tag to 'test_tag'; +set spanner.exclude_txn_from_change_streams = true bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'test_tag'/#; +%set spanner.exclude_txn_from_change_streams = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to/#'test_tag'; +set spanner.exclude_txn_from_change_streams = true%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.statement_tag to 'test_tag'; +set spanner.exclude_txn_from_change_streams =%true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to 'test_tag'/-; +_set spanner.exclude_txn_from_change_streams = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.statement_tag to/-'test_tag'; +set spanner.exclude_txn_from_change_streams = true_; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag='tag1'; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =_true; NEW_CONNECTION; -set autocommit = false; -SET SPANNER.TRANSACTION_TAG='TAG1'; +@EXPECT EXCEPTION INVALID_ARGUMENT +&set spanner.exclude_txn_from_change_streams = true; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag='tag1'; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true&; NEW_CONNECTION; -set autocommit = false; - set spanner.transaction_tag='tag1'; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =&true; NEW_CONNECTION; -set autocommit = false; - set spanner.transaction_tag='tag1'; +@EXPECT EXCEPTION INVALID_ARGUMENT +$set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true$; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =$true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +@set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true@; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =@true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +!set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true!; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =!true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +*set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true*; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =*true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +(set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true(; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =(true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +)set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true); +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =)true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =-true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT ++set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true+; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =+true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-#set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true-#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =-#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +\set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true\; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =\true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +?set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true?; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =?true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +-/set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true-/; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =-/true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/#set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true/#; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =/#true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +/-set spanner.exclude_txn_from_change_streams = true; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams = true/-; +NEW_CONNECTION; +@EXPECT EXCEPTION INVALID_ARGUMENT +set spanner.exclude_txn_from_change_streams =/-true; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; +SET SPANNER.EXCLUDE_TXN_FROM_CHANGE_STREAMS = FALSE; +NEW_CONNECTION; +set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; + set spanner.exclude_txn_from_change_streams = false; +NEW_CONNECTION; + set spanner.exclude_txn_from_change_streams = false; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag='tag1'; +set spanner.exclude_txn_from_change_streams = false; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag='tag1' ; +set spanner.exclude_txn_from_change_streams = false ; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag='tag1' ; +set spanner.exclude_txn_from_change_streams = false ; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag='tag1' +set spanner.exclude_txn_from_change_streams = false ; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag='tag1'; +set spanner.exclude_txn_from_change_streams = false; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag='tag1'; +set spanner.exclude_txn_from_change_streams = false; NEW_CONNECTION; -set autocommit = false; set -spanner.transaction_tag='tag1'; +spanner.exclude_txn_from_change_streams += +false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.transaction_tag='tag1'; +foo set spanner.exclude_txn_from_change_streams = false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag1' bar; +set spanner.exclude_txn_from_change_streams = false bar; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.transaction_tag='tag1'; +%set spanner.exclude_txn_from_change_streams = false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag1'%; +set spanner.exclude_txn_from_change_streams = false%; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set%spanner.transaction_tag='tag1'; +set spanner.exclude_txn_from_change_streams =%false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.transaction_tag='tag1'; +_set spanner.exclude_txn_from_change_streams = false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag1'_; +set spanner.exclude_txn_from_change_streams = false_; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set_spanner.transaction_tag='tag1'; +set spanner.exclude_txn_from_change_streams =_false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.transaction_tag='tag1'; +&set spanner.exclude_txn_from_change_streams = false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag1'&; +set spanner.exclude_txn_from_change_streams = false&; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set&spanner.transaction_tag='tag1'; +set spanner.exclude_txn_from_change_streams =&false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.transaction_tag='tag1'; +$set spanner.exclude_txn_from_change_streams = false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag1'$; +set spanner.exclude_txn_from_change_streams = false$; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set$spanner.transaction_tag='tag1'; +set spanner.exclude_txn_from_change_streams =$false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.transaction_tag='tag1'; +@set spanner.exclude_txn_from_change_streams = false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag1'@; +set spanner.exclude_txn_from_change_streams = false@; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set@spanner.transaction_tag='tag1'; +set spanner.exclude_txn_from_change_streams =@false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.transaction_tag='tag1'; +!set spanner.exclude_txn_from_change_streams = false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag1'!; +set spanner.exclude_txn_from_change_streams = false!; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set!spanner.transaction_tag='tag1'; +set spanner.exclude_txn_from_change_streams =!false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.transaction_tag='tag1'; +*set spanner.exclude_txn_from_change_streams = false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag1'*; +set spanner.exclude_txn_from_change_streams = false*; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set*spanner.transaction_tag='tag1'; +set spanner.exclude_txn_from_change_streams =*false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.transaction_tag='tag1'; +(set spanner.exclude_txn_from_change_streams = false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag1'(; +set spanner.exclude_txn_from_change_streams = false(; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set(spanner.transaction_tag='tag1'; +set spanner.exclude_txn_from_change_streams =(false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.transaction_tag='tag1'; +)set spanner.exclude_txn_from_change_streams = false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag1'); +set spanner.exclude_txn_from_change_streams = false); NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set)spanner.transaction_tag='tag1'; +set spanner.exclude_txn_from_change_streams =)false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.transaction_tag='tag1'; +-set spanner.exclude_txn_from_change_streams = false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag1'-; +set spanner.exclude_txn_from_change_streams = false-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set-spanner.transaction_tag='tag1'; +set spanner.exclude_txn_from_change_streams =-false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.transaction_tag='tag1'; ++set spanner.exclude_txn_from_change_streams = false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag1'+; +set spanner.exclude_txn_from_change_streams = false+; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set+spanner.transaction_tag='tag1'; +set spanner.exclude_txn_from_change_streams =+false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.transaction_tag='tag1'; +-#set spanner.exclude_txn_from_change_streams = false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag1'-#; +set spanner.exclude_txn_from_change_streams = false-#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#spanner.transaction_tag='tag1'; +set spanner.exclude_txn_from_change_streams =-#false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.transaction_tag='tag1'; +/set spanner.exclude_txn_from_change_streams = false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag1'/; +set spanner.exclude_txn_from_change_streams = false/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set/spanner.transaction_tag='tag1'; +set spanner.exclude_txn_from_change_streams =/false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.transaction_tag='tag1'; +\set spanner.exclude_txn_from_change_streams = false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag1'\; +set spanner.exclude_txn_from_change_streams = false\; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set\spanner.transaction_tag='tag1'; +set spanner.exclude_txn_from_change_streams =\false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.transaction_tag='tag1'; +?set spanner.exclude_txn_from_change_streams = false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag1'?; +set spanner.exclude_txn_from_change_streams = false?; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set?spanner.transaction_tag='tag1'; +set spanner.exclude_txn_from_change_streams =?false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.transaction_tag='tag1'; +-/set spanner.exclude_txn_from_change_streams = false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag1'-/; +set spanner.exclude_txn_from_change_streams = false-/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/spanner.transaction_tag='tag1'; +set spanner.exclude_txn_from_change_streams =-/false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.transaction_tag='tag1'; +/#set spanner.exclude_txn_from_change_streams = false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag1'/#; +set spanner.exclude_txn_from_change_streams = false/#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#spanner.transaction_tag='tag1'; +set spanner.exclude_txn_from_change_streams =/#false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.transaction_tag='tag1'; +/-set spanner.exclude_txn_from_change_streams = false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag1'/-; +set spanner.exclude_txn_from_change_streams = false/-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-spanner.transaction_tag='tag1'; +set spanner.exclude_txn_from_change_streams =/-false; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag='tag2'; +set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; -SET SPANNER.TRANSACTION_TAG='TAG2'; +SET SPANNER.EXCLUDE_TXN_FROM_CHANGE_STREAMS TO TRUE; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag='tag2'; +set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; - set spanner.transaction_tag='tag2'; + set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; - set spanner.transaction_tag='tag2'; + set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag='tag2'; +set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag='tag2' ; +set spanner.exclude_txn_from_change_streams to true ; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag='tag2' ; +set spanner.exclude_txn_from_change_streams to true ; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag='tag2' +set spanner.exclude_txn_from_change_streams to true ; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag='tag2'; +set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag='tag2'; +set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; set -spanner.transaction_tag='tag2'; +spanner.exclude_txn_from_change_streams +to +true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.transaction_tag='tag2'; +foo set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag2' bar; +set spanner.exclude_txn_from_change_streams to true bar; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.transaction_tag='tag2'; +%set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag2'%; +set spanner.exclude_txn_from_change_streams to true%; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set%spanner.transaction_tag='tag2'; +set spanner.exclude_txn_from_change_streams to%true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.transaction_tag='tag2'; +_set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag2'_; +set spanner.exclude_txn_from_change_streams to true_; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set_spanner.transaction_tag='tag2'; +set spanner.exclude_txn_from_change_streams to_true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.transaction_tag='tag2'; +&set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag2'&; +set spanner.exclude_txn_from_change_streams to true&; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set&spanner.transaction_tag='tag2'; +set spanner.exclude_txn_from_change_streams to&true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.transaction_tag='tag2'; +$set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag2'$; +set spanner.exclude_txn_from_change_streams to true$; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set$spanner.transaction_tag='tag2'; +set spanner.exclude_txn_from_change_streams to$true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.transaction_tag='tag2'; +@set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag2'@; +set spanner.exclude_txn_from_change_streams to true@; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set@spanner.transaction_tag='tag2'; +set spanner.exclude_txn_from_change_streams to@true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.transaction_tag='tag2'; +!set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag2'!; +set spanner.exclude_txn_from_change_streams to true!; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set!spanner.transaction_tag='tag2'; +set spanner.exclude_txn_from_change_streams to!true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.transaction_tag='tag2'; +*set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag2'*; +set spanner.exclude_txn_from_change_streams to true*; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set*spanner.transaction_tag='tag2'; +set spanner.exclude_txn_from_change_streams to*true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.transaction_tag='tag2'; +(set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag2'(; +set spanner.exclude_txn_from_change_streams to true(; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set(spanner.transaction_tag='tag2'; +set spanner.exclude_txn_from_change_streams to(true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.transaction_tag='tag2'; +)set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag2'); +set spanner.exclude_txn_from_change_streams to true); NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set)spanner.transaction_tag='tag2'; +set spanner.exclude_txn_from_change_streams to)true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.transaction_tag='tag2'; +-set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag2'-; +set spanner.exclude_txn_from_change_streams to true-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set-spanner.transaction_tag='tag2'; +set spanner.exclude_txn_from_change_streams to-true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.transaction_tag='tag2'; ++set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag2'+; +set spanner.exclude_txn_from_change_streams to true+; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set+spanner.transaction_tag='tag2'; +set spanner.exclude_txn_from_change_streams to+true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.transaction_tag='tag2'; +-#set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag2'-#; +set spanner.exclude_txn_from_change_streams to true-#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#spanner.transaction_tag='tag2'; +set spanner.exclude_txn_from_change_streams to-#true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.transaction_tag='tag2'; +/set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag2'/; +set spanner.exclude_txn_from_change_streams to true/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set/spanner.transaction_tag='tag2'; +set spanner.exclude_txn_from_change_streams to/true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.transaction_tag='tag2'; +\set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag2'\; +set spanner.exclude_txn_from_change_streams to true\; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set\spanner.transaction_tag='tag2'; +set spanner.exclude_txn_from_change_streams to\true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.transaction_tag='tag2'; +?set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag2'?; +set spanner.exclude_txn_from_change_streams to true?; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set?spanner.transaction_tag='tag2'; +set spanner.exclude_txn_from_change_streams to?true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.transaction_tag='tag2'; +-/set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag2'-/; +set spanner.exclude_txn_from_change_streams to true-/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/spanner.transaction_tag='tag2'; +set spanner.exclude_txn_from_change_streams to-/true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.transaction_tag='tag2'; +/#set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag2'/#; +set spanner.exclude_txn_from_change_streams to true/#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#spanner.transaction_tag='tag2'; +set spanner.exclude_txn_from_change_streams to/#true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.transaction_tag='tag2'; +/-set spanner.exclude_txn_from_change_streams to true; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='tag2'/-; +set spanner.exclude_txn_from_change_streams to true/-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-spanner.transaction_tag='tag2'; +set spanner.exclude_txn_from_change_streams to/-true; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag=''; +set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; -SET SPANNER.TRANSACTION_TAG=''; +SET SPANNER.EXCLUDE_TXN_FROM_CHANGE_STREAMS TO FALSE; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag=''; +set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; - set spanner.transaction_tag=''; + set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; - set spanner.transaction_tag=''; + set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag=''; +set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag='' ; +set spanner.exclude_txn_from_change_streams to false ; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag='' ; +set spanner.exclude_txn_from_change_streams to false ; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag='' +set spanner.exclude_txn_from_change_streams to false ; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag=''; +set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag=''; +set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; set -spanner.transaction_tag=''; +spanner.exclude_txn_from_change_streams +to +false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.transaction_tag=''; +foo set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag='' bar; +set spanner.exclude_txn_from_change_streams to false bar; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.transaction_tag=''; +%set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag=''%; +set spanner.exclude_txn_from_change_streams to false%; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set%spanner.transaction_tag=''; +set spanner.exclude_txn_from_change_streams to%false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.transaction_tag=''; +_set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag=''_; +set spanner.exclude_txn_from_change_streams to false_; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set_spanner.transaction_tag=''; +set spanner.exclude_txn_from_change_streams to_false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.transaction_tag=''; +&set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag=''&; +set spanner.exclude_txn_from_change_streams to false&; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set&spanner.transaction_tag=''; +set spanner.exclude_txn_from_change_streams to&false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.transaction_tag=''; +$set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag=''$; +set spanner.exclude_txn_from_change_streams to false$; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set$spanner.transaction_tag=''; +set spanner.exclude_txn_from_change_streams to$false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.transaction_tag=''; +@set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag=''@; +set spanner.exclude_txn_from_change_streams to false@; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set@spanner.transaction_tag=''; +set spanner.exclude_txn_from_change_streams to@false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.transaction_tag=''; +!set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag=''!; +set spanner.exclude_txn_from_change_streams to false!; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set!spanner.transaction_tag=''; +set spanner.exclude_txn_from_change_streams to!false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.transaction_tag=''; +*set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag=''*; +set spanner.exclude_txn_from_change_streams to false*; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set*spanner.transaction_tag=''; +set spanner.exclude_txn_from_change_streams to*false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.transaction_tag=''; +(set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag=''(; +set spanner.exclude_txn_from_change_streams to false(; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set(spanner.transaction_tag=''; +set spanner.exclude_txn_from_change_streams to(false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.transaction_tag=''; +)set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag=''); +set spanner.exclude_txn_from_change_streams to false); NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set)spanner.transaction_tag=''; +set spanner.exclude_txn_from_change_streams to)false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.transaction_tag=''; +-set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag=''-; +set spanner.exclude_txn_from_change_streams to false-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set-spanner.transaction_tag=''; +set spanner.exclude_txn_from_change_streams to-false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.transaction_tag=''; ++set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag=''+; +set spanner.exclude_txn_from_change_streams to false+; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set+spanner.transaction_tag=''; +set spanner.exclude_txn_from_change_streams to+false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.transaction_tag=''; +-#set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag=''-#; +set spanner.exclude_txn_from_change_streams to false-#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#spanner.transaction_tag=''; +set spanner.exclude_txn_from_change_streams to-#false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.transaction_tag=''; +/set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag=''/; +set spanner.exclude_txn_from_change_streams to false/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set/spanner.transaction_tag=''; +set spanner.exclude_txn_from_change_streams to/false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.transaction_tag=''; +\set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag=''\; +set spanner.exclude_txn_from_change_streams to false\; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set\spanner.transaction_tag=''; +set spanner.exclude_txn_from_change_streams to\false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.transaction_tag=''; +?set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag=''?; +set spanner.exclude_txn_from_change_streams to false?; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set?spanner.transaction_tag=''; +set spanner.exclude_txn_from_change_streams to?false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.transaction_tag=''; +-/set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag=''-/; +set spanner.exclude_txn_from_change_streams to false-/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/spanner.transaction_tag=''; +set spanner.exclude_txn_from_change_streams to-/false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.transaction_tag=''; +/#set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag=''/#; +set spanner.exclude_txn_from_change_streams to false/#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#spanner.transaction_tag=''; +set spanner.exclude_txn_from_change_streams to/#false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.transaction_tag=''; +/-set spanner.exclude_txn_from_change_streams to false; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag=''/-; +set spanner.exclude_txn_from_change_streams to false/-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-spanner.transaction_tag=''; +set spanner.exclude_txn_from_change_streams to/-false; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to 'tag1'; +set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; -SET SPANNER.TRANSACTION_TAG TO 'TAG1'; +SET SPANNER.RPC_PRIORITY='HIGH'; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to 'tag1'; +set spanner.rpc_priority='high'; NEW_CONNECTION; -set autocommit = false; - set spanner.transaction_tag to 'tag1'; + set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; - set spanner.transaction_tag to 'tag1'; + set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to 'tag1'; +set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to 'tag1' ; +set spanner.rpc_priority='HIGH' ; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to 'tag1' ; +set spanner.rpc_priority='HIGH' ; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to 'tag1' +set spanner.rpc_priority='HIGH' ; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to 'tag1'; +set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to 'tag1'; +set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; set -spanner.transaction_tag -to -'tag1'; +spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.transaction_tag to 'tag1'; +foo set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag1' bar; +set spanner.rpc_priority='HIGH' bar; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.transaction_tag to 'tag1'; +%set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag1'%; +set spanner.rpc_priority='HIGH'%; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to%'tag1'; +set%spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.transaction_tag to 'tag1'; +_set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag1'_; +set spanner.rpc_priority='HIGH'_; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to_'tag1'; +set_spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.transaction_tag to 'tag1'; +&set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag1'&; +set spanner.rpc_priority='HIGH'&; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to&'tag1'; +set&spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.transaction_tag to 'tag1'; +$set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag1'$; +set spanner.rpc_priority='HIGH'$; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to$'tag1'; +set$spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.transaction_tag to 'tag1'; +@set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag1'@; +set spanner.rpc_priority='HIGH'@; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to@'tag1'; +set@spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.transaction_tag to 'tag1'; +!set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag1'!; +set spanner.rpc_priority='HIGH'!; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to!'tag1'; +set!spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.transaction_tag to 'tag1'; +*set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag1'*; +set spanner.rpc_priority='HIGH'*; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to*'tag1'; +set*spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.transaction_tag to 'tag1'; +(set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag1'(; +set spanner.rpc_priority='HIGH'(; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to('tag1'; +set(spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.transaction_tag to 'tag1'; +)set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag1'); +set spanner.rpc_priority='HIGH'); NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to)'tag1'; +set)spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.transaction_tag to 'tag1'; +-set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag1'-; +set spanner.rpc_priority='HIGH'-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to-'tag1'; +set-spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.transaction_tag to 'tag1'; ++set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag1'+; +set spanner.rpc_priority='HIGH'+; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to+'tag1'; +set+spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.transaction_tag to 'tag1'; +-#set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag1'-#; +set spanner.rpc_priority='HIGH'-#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to-#'tag1'; +set-#spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.transaction_tag to 'tag1'; +/set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag1'/; +set spanner.rpc_priority='HIGH'/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to/'tag1'; +set/spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.transaction_tag to 'tag1'; +\set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag1'\; +set spanner.rpc_priority='HIGH'\; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to\'tag1'; +set\spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.transaction_tag to 'tag1'; +?set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag1'?; +set spanner.rpc_priority='HIGH'?; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to?'tag1'; +set?spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.transaction_tag to 'tag1'; +-/set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag1'-/; +set spanner.rpc_priority='HIGH'-/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to-/'tag1'; +set-/spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.transaction_tag to 'tag1'; +/#set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag1'/#; +set spanner.rpc_priority='HIGH'/#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to/#'tag1'; +set/#spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.transaction_tag to 'tag1'; +/-set spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag1'/-; +set spanner.rpc_priority='HIGH'/-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to/-'tag1'; +set/-spanner.rpc_priority='HIGH'; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to 'tag2'; +set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; -SET SPANNER.TRANSACTION_TAG TO 'TAG2'; +SET SPANNER.RPC_PRIORITY='MEDIUM'; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to 'tag2'; +set spanner.rpc_priority='medium'; NEW_CONNECTION; -set autocommit = false; - set spanner.transaction_tag to 'tag2'; + set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; - set spanner.transaction_tag to 'tag2'; + set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to 'tag2'; +set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to 'tag2' ; +set spanner.rpc_priority='MEDIUM' ; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to 'tag2' ; +set spanner.rpc_priority='MEDIUM' ; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to 'tag2' +set spanner.rpc_priority='MEDIUM' ; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to 'tag2'; +set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to 'tag2'; +set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; set -spanner.transaction_tag -to -'tag2'; +spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.transaction_tag to 'tag2'; +foo set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag2' bar; +set spanner.rpc_priority='MEDIUM' bar; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.transaction_tag to 'tag2'; +%set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag2'%; +set spanner.rpc_priority='MEDIUM'%; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to%'tag2'; +set%spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.transaction_tag to 'tag2'; +_set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag2'_; +set spanner.rpc_priority='MEDIUM'_; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to_'tag2'; +set_spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.transaction_tag to 'tag2'; +&set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag2'&; +set spanner.rpc_priority='MEDIUM'&; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to&'tag2'; +set&spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.transaction_tag to 'tag2'; +$set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag2'$; +set spanner.rpc_priority='MEDIUM'$; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to$'tag2'; +set$spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.transaction_tag to 'tag2'; +@set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag2'@; +set spanner.rpc_priority='MEDIUM'@; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to@'tag2'; +set@spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.transaction_tag to 'tag2'; +!set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag2'!; +set spanner.rpc_priority='MEDIUM'!; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to!'tag2'; +set!spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.transaction_tag to 'tag2'; +*set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag2'*; +set spanner.rpc_priority='MEDIUM'*; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to*'tag2'; +set*spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.transaction_tag to 'tag2'; +(set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag2'(; +set spanner.rpc_priority='MEDIUM'(; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to('tag2'; +set(spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.transaction_tag to 'tag2'; +)set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag2'); +set spanner.rpc_priority='MEDIUM'); NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to)'tag2'; +set)spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.transaction_tag to 'tag2'; +-set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag2'-; +set spanner.rpc_priority='MEDIUM'-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to-'tag2'; +set-spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.transaction_tag to 'tag2'; ++set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag2'+; +set spanner.rpc_priority='MEDIUM'+; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to+'tag2'; +set+spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.transaction_tag to 'tag2'; +-#set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag2'-#; +set spanner.rpc_priority='MEDIUM'-#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to-#'tag2'; +set-#spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.transaction_tag to 'tag2'; +/set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag2'/; +set spanner.rpc_priority='MEDIUM'/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to/'tag2'; +set/spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.transaction_tag to 'tag2'; +\set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag2'\; +set spanner.rpc_priority='MEDIUM'\; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to\'tag2'; +set\spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.transaction_tag to 'tag2'; +?set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag2'?; +set spanner.rpc_priority='MEDIUM'?; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to?'tag2'; +set?spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.transaction_tag to 'tag2'; +-/set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag2'-/; +set spanner.rpc_priority='MEDIUM'-/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to-/'tag2'; +set-/spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.transaction_tag to 'tag2'; +/#set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag2'/#; +set spanner.rpc_priority='MEDIUM'/#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to/#'tag2'; +set/#spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.transaction_tag to 'tag2'; +/-set spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'tag2'/-; +set spanner.rpc_priority='MEDIUM'/-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to/-'tag2'; +set/-spanner.rpc_priority='MEDIUM'; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to ''; +set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; -SET SPANNER.TRANSACTION_TAG TO ''; +SET SPANNER.RPC_PRIORITY='LOW'; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to ''; +set spanner.rpc_priority='low'; NEW_CONNECTION; -set autocommit = false; - set spanner.transaction_tag to ''; + set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; - set spanner.transaction_tag to ''; + set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to ''; +set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to '' ; +set spanner.rpc_priority='LOW' ; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to '' ; +set spanner.rpc_priority='LOW' ; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to '' +set spanner.rpc_priority='LOW' ; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to ''; +set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to ''; +set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; set -spanner.transaction_tag -to -''; +spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.transaction_tag to ''; +foo set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to '' bar; +set spanner.rpc_priority='LOW' bar; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.transaction_tag to ''; +%set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to ''%; +set spanner.rpc_priority='LOW'%; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to%''; +set%spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.transaction_tag to ''; +_set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to ''_; +set spanner.rpc_priority='LOW'_; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to_''; +set_spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.transaction_tag to ''; +&set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to ''&; +set spanner.rpc_priority='LOW'&; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to&''; +set&spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.transaction_tag to ''; +$set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to ''$; +set spanner.rpc_priority='LOW'$; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to$''; +set$spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.transaction_tag to ''; +@set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to ''@; +set spanner.rpc_priority='LOW'@; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to@''; +set@spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.transaction_tag to ''; +!set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to ''!; +set spanner.rpc_priority='LOW'!; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to!''; +set!spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.transaction_tag to ''; +*set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to ''*; +set spanner.rpc_priority='LOW'*; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to*''; +set*spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.transaction_tag to ''; +(set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to ''(; +set spanner.rpc_priority='LOW'(; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to(''; +set(spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.transaction_tag to ''; +)set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to ''); +set spanner.rpc_priority='LOW'); NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to)''; +set)spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.transaction_tag to ''; +-set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to ''-; +set spanner.rpc_priority='LOW'-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to-''; +set-spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.transaction_tag to ''; ++set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to ''+; +set spanner.rpc_priority='LOW'+; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to+''; +set+spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.transaction_tag to ''; +-#set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to ''-#; +set spanner.rpc_priority='LOW'-#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to-#''; +set-#spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.transaction_tag to ''; +/set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to ''/; +set spanner.rpc_priority='LOW'/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to/''; +set/spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.transaction_tag to ''; +\set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to ''\; +set spanner.rpc_priority='LOW'\; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to\''; +set\spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.transaction_tag to ''; +?set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to ''?; +set spanner.rpc_priority='LOW'?; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to?''; +set?spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.transaction_tag to ''; +-/set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to ''-/; +set spanner.rpc_priority='LOW'-/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to-/''; +set-/spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.transaction_tag to ''; +/#set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to ''/#; +set spanner.rpc_priority='LOW'/#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to/#''; +set/#spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.transaction_tag to ''; +/-set spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to ''/-; +set spanner.rpc_priority='LOW'/-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to/-''; +set/-spanner.rpc_priority='LOW'; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to 'test_tag'; +set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; -SET SPANNER.TRANSACTION_TAG TO 'TEST_TAG'; +SET SPANNER.RPC_PRIORITY='NULL'; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to 'test_tag'; +set spanner.rpc_priority='null'; NEW_CONNECTION; -set autocommit = false; - set spanner.transaction_tag to 'test_tag'; + set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; - set spanner.transaction_tag to 'test_tag'; + set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to 'test_tag'; +set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to 'test_tag' ; +set spanner.rpc_priority='NULL' ; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to 'test_tag' ; +set spanner.rpc_priority='NULL' ; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to 'test_tag' +set spanner.rpc_priority='NULL' ; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to 'test_tag'; +set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; -set spanner.transaction_tag to 'test_tag'; +set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; set -spanner.transaction_tag -to -'test_tag'; +spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.transaction_tag to 'test_tag'; +foo set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'test_tag' bar; +set spanner.rpc_priority='NULL' bar; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.transaction_tag to 'test_tag'; +%set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'test_tag'%; +set spanner.rpc_priority='NULL'%; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to%'test_tag'; +set%spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.transaction_tag to 'test_tag'; +_set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'test_tag'_; +set spanner.rpc_priority='NULL'_; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to_'test_tag'; +set_spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.transaction_tag to 'test_tag'; +&set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'test_tag'&; +set spanner.rpc_priority='NULL'&; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to&'test_tag'; +set&spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.transaction_tag to 'test_tag'; +$set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'test_tag'$; +set spanner.rpc_priority='NULL'$; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to$'test_tag'; +set$spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.transaction_tag to 'test_tag'; +@set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'test_tag'@; +set spanner.rpc_priority='NULL'@; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to@'test_tag'; +set@spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.transaction_tag to 'test_tag'; +!set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'test_tag'!; +set spanner.rpc_priority='NULL'!; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to!'test_tag'; +set!spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.transaction_tag to 'test_tag'; +*set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'test_tag'*; +set spanner.rpc_priority='NULL'*; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to*'test_tag'; +set*spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.transaction_tag to 'test_tag'; +(set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'test_tag'(; +set spanner.rpc_priority='NULL'(; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to('test_tag'; +set(spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.transaction_tag to 'test_tag'; +)set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'test_tag'); +set spanner.rpc_priority='NULL'); NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to)'test_tag'; +set)spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.transaction_tag to 'test_tag'; +-set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'test_tag'-; +set spanner.rpc_priority='NULL'-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to-'test_tag'; +set-spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.transaction_tag to 'test_tag'; ++set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'test_tag'+; +set spanner.rpc_priority='NULL'+; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to+'test_tag'; +set+spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.transaction_tag to 'test_tag'; +-#set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'test_tag'-#; +set spanner.rpc_priority='NULL'-#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to-#'test_tag'; +set-#spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.transaction_tag to 'test_tag'; +/set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'test_tag'/; +set spanner.rpc_priority='NULL'/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to/'test_tag'; +set/spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.transaction_tag to 'test_tag'; +\set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'test_tag'\; +set spanner.rpc_priority='NULL'\; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to\'test_tag'; +set\spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.transaction_tag to 'test_tag'; +?set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'test_tag'?; +set spanner.rpc_priority='NULL'?; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to?'test_tag'; +set?spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.transaction_tag to 'test_tag'; +-/set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'test_tag'-/; +set spanner.rpc_priority='NULL'-/; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to-/'test_tag'; +set-/spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.transaction_tag to 'test_tag'; +/#set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'test_tag'/#; +set spanner.rpc_priority='NULL'/#; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to/#'test_tag'; +set/#spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.transaction_tag to 'test_tag'; +/-set spanner.rpc_priority='NULL'; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to 'test_tag'/-; +set spanner.rpc_priority='NULL'/-; NEW_CONNECTION; -set autocommit = false; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.transaction_tag to/-'test_tag'; +set/-spanner.rpc_priority='NULL'; NEW_CONNECTION; -set spanner.rpc_priority='HIGH'; +set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; -SET SPANNER.RPC_PRIORITY='HIGH'; +SET SPANNER.RPC_PRIORITY TO 'HIGH'; NEW_CONNECTION; -set spanner.rpc_priority='high'; +set spanner.rpc_priority to 'high'; NEW_CONNECTION; - set spanner.rpc_priority='HIGH'; + set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; - set spanner.rpc_priority='HIGH'; + set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; -set spanner.rpc_priority='HIGH'; +set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; -set spanner.rpc_priority='HIGH' ; +set spanner.rpc_priority to 'HIGH' ; NEW_CONNECTION; -set spanner.rpc_priority='HIGH' ; +set spanner.rpc_priority to 'HIGH' ; NEW_CONNECTION; -set spanner.rpc_priority='HIGH' +set spanner.rpc_priority to 'HIGH' ; NEW_CONNECTION; -set spanner.rpc_priority='HIGH'; +set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; -set spanner.rpc_priority='HIGH'; +set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; set -spanner.rpc_priority='HIGH'; +spanner.rpc_priority +to +'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.rpc_priority='HIGH'; +foo set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='HIGH' bar; +set spanner.rpc_priority to 'HIGH' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.rpc_priority='HIGH'; +%set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='HIGH'%; +set spanner.rpc_priority to 'HIGH'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%spanner.rpc_priority='HIGH'; +set spanner.rpc_priority to%'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.rpc_priority='HIGH'; +_set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='HIGH'_; +set spanner.rpc_priority to 'HIGH'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_spanner.rpc_priority='HIGH'; +set spanner.rpc_priority to_'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.rpc_priority='HIGH'; +&set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='HIGH'&; +set spanner.rpc_priority to 'HIGH'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&spanner.rpc_priority='HIGH'; +set spanner.rpc_priority to&'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.rpc_priority='HIGH'; +$set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='HIGH'$; +set spanner.rpc_priority to 'HIGH'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$spanner.rpc_priority='HIGH'; +set spanner.rpc_priority to$'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.rpc_priority='HIGH'; +@set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='HIGH'@; +set spanner.rpc_priority to 'HIGH'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@spanner.rpc_priority='HIGH'; +set spanner.rpc_priority to@'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.rpc_priority='HIGH'; +!set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='HIGH'!; +set spanner.rpc_priority to 'HIGH'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!spanner.rpc_priority='HIGH'; +set spanner.rpc_priority to!'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.rpc_priority='HIGH'; +*set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='HIGH'*; +set spanner.rpc_priority to 'HIGH'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*spanner.rpc_priority='HIGH'; +set spanner.rpc_priority to*'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.rpc_priority='HIGH'; +(set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='HIGH'(; +set spanner.rpc_priority to 'HIGH'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(spanner.rpc_priority='HIGH'; +set spanner.rpc_priority to('HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.rpc_priority='HIGH'; +)set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='HIGH'); +set spanner.rpc_priority to 'HIGH'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)spanner.rpc_priority='HIGH'; +set spanner.rpc_priority to)'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.rpc_priority='HIGH'; +-set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='HIGH'-; +set spanner.rpc_priority to 'HIGH'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-spanner.rpc_priority='HIGH'; +set spanner.rpc_priority to-'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.rpc_priority='HIGH'; ++set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='HIGH'+; +set spanner.rpc_priority to 'HIGH'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+spanner.rpc_priority='HIGH'; +set spanner.rpc_priority to+'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.rpc_priority='HIGH'; +-#set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='HIGH'-#; +set spanner.rpc_priority to 'HIGH'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#spanner.rpc_priority='HIGH'; +set spanner.rpc_priority to-#'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.rpc_priority='HIGH'; +/set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='HIGH'/; +set spanner.rpc_priority to 'HIGH'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/spanner.rpc_priority='HIGH'; +set spanner.rpc_priority to/'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.rpc_priority='HIGH'; +\set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='HIGH'\; +set spanner.rpc_priority to 'HIGH'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\spanner.rpc_priority='HIGH'; +set spanner.rpc_priority to\'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.rpc_priority='HIGH'; +?set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='HIGH'?; +set spanner.rpc_priority to 'HIGH'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?spanner.rpc_priority='HIGH'; +set spanner.rpc_priority to?'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.rpc_priority='HIGH'; +-/set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='HIGH'-/; +set spanner.rpc_priority to 'HIGH'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/spanner.rpc_priority='HIGH'; +set spanner.rpc_priority to-/'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.rpc_priority='HIGH'; +/#set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='HIGH'/#; +set spanner.rpc_priority to 'HIGH'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#spanner.rpc_priority='HIGH'; +set spanner.rpc_priority to/#'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.rpc_priority='HIGH'; +/-set spanner.rpc_priority to 'HIGH'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='HIGH'/-; +set spanner.rpc_priority to 'HIGH'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-spanner.rpc_priority='HIGH'; +set spanner.rpc_priority to/-'HIGH'; NEW_CONNECTION; -set spanner.rpc_priority='MEDIUM'; +set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; -SET SPANNER.RPC_PRIORITY='MEDIUM'; +SET SPANNER.RPC_PRIORITY TO 'MEDIUM'; NEW_CONNECTION; -set spanner.rpc_priority='medium'; +set spanner.rpc_priority to 'medium'; NEW_CONNECTION; - set spanner.rpc_priority='MEDIUM'; + set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; - set spanner.rpc_priority='MEDIUM'; + set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; -set spanner.rpc_priority='MEDIUM'; +set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; -set spanner.rpc_priority='MEDIUM' ; +set spanner.rpc_priority to 'MEDIUM' ; NEW_CONNECTION; -set spanner.rpc_priority='MEDIUM' ; +set spanner.rpc_priority to 'MEDIUM' ; NEW_CONNECTION; -set spanner.rpc_priority='MEDIUM' +set spanner.rpc_priority to 'MEDIUM' ; NEW_CONNECTION; -set spanner.rpc_priority='MEDIUM'; +set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; -set spanner.rpc_priority='MEDIUM'; +set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; set -spanner.rpc_priority='MEDIUM'; +spanner.rpc_priority +to +'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.rpc_priority='MEDIUM'; +foo set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='MEDIUM' bar; +set spanner.rpc_priority to 'MEDIUM' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.rpc_priority='MEDIUM'; +%set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='MEDIUM'%; +set spanner.rpc_priority to 'MEDIUM'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%spanner.rpc_priority='MEDIUM'; +set spanner.rpc_priority to%'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.rpc_priority='MEDIUM'; +_set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='MEDIUM'_; +set spanner.rpc_priority to 'MEDIUM'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_spanner.rpc_priority='MEDIUM'; +set spanner.rpc_priority to_'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.rpc_priority='MEDIUM'; +&set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='MEDIUM'&; +set spanner.rpc_priority to 'MEDIUM'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&spanner.rpc_priority='MEDIUM'; +set spanner.rpc_priority to&'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.rpc_priority='MEDIUM'; +$set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='MEDIUM'$; +set spanner.rpc_priority to 'MEDIUM'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$spanner.rpc_priority='MEDIUM'; +set spanner.rpc_priority to$'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.rpc_priority='MEDIUM'; +@set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='MEDIUM'@; +set spanner.rpc_priority to 'MEDIUM'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@spanner.rpc_priority='MEDIUM'; +set spanner.rpc_priority to@'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.rpc_priority='MEDIUM'; +!set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='MEDIUM'!; +set spanner.rpc_priority to 'MEDIUM'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!spanner.rpc_priority='MEDIUM'; +set spanner.rpc_priority to!'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.rpc_priority='MEDIUM'; +*set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='MEDIUM'*; +set spanner.rpc_priority to 'MEDIUM'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*spanner.rpc_priority='MEDIUM'; +set spanner.rpc_priority to*'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.rpc_priority='MEDIUM'; +(set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='MEDIUM'(; +set spanner.rpc_priority to 'MEDIUM'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(spanner.rpc_priority='MEDIUM'; +set spanner.rpc_priority to('MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.rpc_priority='MEDIUM'; +)set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='MEDIUM'); +set spanner.rpc_priority to 'MEDIUM'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)spanner.rpc_priority='MEDIUM'; +set spanner.rpc_priority to)'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.rpc_priority='MEDIUM'; +-set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='MEDIUM'-; +set spanner.rpc_priority to 'MEDIUM'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-spanner.rpc_priority='MEDIUM'; +set spanner.rpc_priority to-'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.rpc_priority='MEDIUM'; ++set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='MEDIUM'+; +set spanner.rpc_priority to 'MEDIUM'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+spanner.rpc_priority='MEDIUM'; +set spanner.rpc_priority to+'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.rpc_priority='MEDIUM'; +-#set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='MEDIUM'-#; +set spanner.rpc_priority to 'MEDIUM'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#spanner.rpc_priority='MEDIUM'; +set spanner.rpc_priority to-#'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.rpc_priority='MEDIUM'; +/set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='MEDIUM'/; +set spanner.rpc_priority to 'MEDIUM'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/spanner.rpc_priority='MEDIUM'; +set spanner.rpc_priority to/'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.rpc_priority='MEDIUM'; +\set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='MEDIUM'\; +set spanner.rpc_priority to 'MEDIUM'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\spanner.rpc_priority='MEDIUM'; +set spanner.rpc_priority to\'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.rpc_priority='MEDIUM'; +?set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='MEDIUM'?; +set spanner.rpc_priority to 'MEDIUM'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?spanner.rpc_priority='MEDIUM'; +set spanner.rpc_priority to?'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.rpc_priority='MEDIUM'; +-/set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='MEDIUM'-/; +set spanner.rpc_priority to 'MEDIUM'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/spanner.rpc_priority='MEDIUM'; +set spanner.rpc_priority to-/'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.rpc_priority='MEDIUM'; +/#set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='MEDIUM'/#; +set spanner.rpc_priority to 'MEDIUM'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#spanner.rpc_priority='MEDIUM'; +set spanner.rpc_priority to/#'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.rpc_priority='MEDIUM'; +/-set spanner.rpc_priority to 'MEDIUM'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='MEDIUM'/-; +set spanner.rpc_priority to 'MEDIUM'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-spanner.rpc_priority='MEDIUM'; +set spanner.rpc_priority to/-'MEDIUM'; NEW_CONNECTION; -set spanner.rpc_priority='LOW'; +set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; -SET SPANNER.RPC_PRIORITY='LOW'; +SET SPANNER.RPC_PRIORITY TO 'LOW'; NEW_CONNECTION; -set spanner.rpc_priority='low'; +set spanner.rpc_priority to 'low'; NEW_CONNECTION; - set spanner.rpc_priority='LOW'; + set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; - set spanner.rpc_priority='LOW'; + set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; -set spanner.rpc_priority='LOW'; +set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; -set spanner.rpc_priority='LOW' ; +set spanner.rpc_priority to 'LOW' ; NEW_CONNECTION; -set spanner.rpc_priority='LOW' ; +set spanner.rpc_priority to 'LOW' ; NEW_CONNECTION; -set spanner.rpc_priority='LOW' +set spanner.rpc_priority to 'LOW' ; NEW_CONNECTION; -set spanner.rpc_priority='LOW'; +set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; -set spanner.rpc_priority='LOW'; +set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; set -spanner.rpc_priority='LOW'; +spanner.rpc_priority +to +'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.rpc_priority='LOW'; +foo set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='LOW' bar; +set spanner.rpc_priority to 'LOW' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.rpc_priority='LOW'; +%set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='LOW'%; +set spanner.rpc_priority to 'LOW'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%spanner.rpc_priority='LOW'; +set spanner.rpc_priority to%'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.rpc_priority='LOW'; +_set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='LOW'_; +set spanner.rpc_priority to 'LOW'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_spanner.rpc_priority='LOW'; +set spanner.rpc_priority to_'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.rpc_priority='LOW'; +&set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='LOW'&; +set spanner.rpc_priority to 'LOW'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&spanner.rpc_priority='LOW'; +set spanner.rpc_priority to&'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.rpc_priority='LOW'; +$set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='LOW'$; +set spanner.rpc_priority to 'LOW'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$spanner.rpc_priority='LOW'; +set spanner.rpc_priority to$'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.rpc_priority='LOW'; +@set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='LOW'@; +set spanner.rpc_priority to 'LOW'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@spanner.rpc_priority='LOW'; +set spanner.rpc_priority to@'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.rpc_priority='LOW'; +!set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='LOW'!; +set spanner.rpc_priority to 'LOW'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!spanner.rpc_priority='LOW'; +set spanner.rpc_priority to!'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.rpc_priority='LOW'; +*set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='LOW'*; +set spanner.rpc_priority to 'LOW'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*spanner.rpc_priority='LOW'; +set spanner.rpc_priority to*'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.rpc_priority='LOW'; +(set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='LOW'(; +set spanner.rpc_priority to 'LOW'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(spanner.rpc_priority='LOW'; +set spanner.rpc_priority to('LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.rpc_priority='LOW'; +)set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='LOW'); +set spanner.rpc_priority to 'LOW'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)spanner.rpc_priority='LOW'; +set spanner.rpc_priority to)'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.rpc_priority='LOW'; +-set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='LOW'-; +set spanner.rpc_priority to 'LOW'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-spanner.rpc_priority='LOW'; +set spanner.rpc_priority to-'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.rpc_priority='LOW'; ++set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='LOW'+; +set spanner.rpc_priority to 'LOW'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+spanner.rpc_priority='LOW'; +set spanner.rpc_priority to+'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.rpc_priority='LOW'; +-#set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='LOW'-#; +set spanner.rpc_priority to 'LOW'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#spanner.rpc_priority='LOW'; +set spanner.rpc_priority to-#'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.rpc_priority='LOW'; +/set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='LOW'/; +set spanner.rpc_priority to 'LOW'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/spanner.rpc_priority='LOW'; +set spanner.rpc_priority to/'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.rpc_priority='LOW'; +\set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='LOW'\; +set spanner.rpc_priority to 'LOW'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\spanner.rpc_priority='LOW'; +set spanner.rpc_priority to\'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.rpc_priority='LOW'; +?set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='LOW'?; +set spanner.rpc_priority to 'LOW'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?spanner.rpc_priority='LOW'; +set spanner.rpc_priority to?'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.rpc_priority='LOW'; +-/set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='LOW'-/; +set spanner.rpc_priority to 'LOW'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/spanner.rpc_priority='LOW'; +set spanner.rpc_priority to-/'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.rpc_priority='LOW'; +/#set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='LOW'/#; +set spanner.rpc_priority to 'LOW'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#spanner.rpc_priority='LOW'; +set spanner.rpc_priority to/#'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.rpc_priority='LOW'; +/-set spanner.rpc_priority to 'LOW'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='LOW'/-; +set spanner.rpc_priority to 'LOW'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-spanner.rpc_priority='LOW'; +set spanner.rpc_priority to/-'LOW'; NEW_CONNECTION; -set spanner.rpc_priority='NULL'; +set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; -SET SPANNER.RPC_PRIORITY='NULL'; +SET SPANNER.RPC_PRIORITY TO 'NULL'; NEW_CONNECTION; -set spanner.rpc_priority='null'; +set spanner.rpc_priority to 'null'; NEW_CONNECTION; - set spanner.rpc_priority='NULL'; + set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; - set spanner.rpc_priority='NULL'; + set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; -set spanner.rpc_priority='NULL'; +set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; -set spanner.rpc_priority='NULL' ; +set spanner.rpc_priority to 'NULL' ; NEW_CONNECTION; -set spanner.rpc_priority='NULL' ; +set spanner.rpc_priority to 'NULL' ; NEW_CONNECTION; -set spanner.rpc_priority='NULL' +set spanner.rpc_priority to 'NULL' ; NEW_CONNECTION; -set spanner.rpc_priority='NULL'; +set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; -set spanner.rpc_priority='NULL'; +set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; set -spanner.rpc_priority='NULL'; +spanner.rpc_priority +to +'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.rpc_priority='NULL'; +foo set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='NULL' bar; +set spanner.rpc_priority to 'NULL' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.rpc_priority='NULL'; +%set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='NULL'%; +set spanner.rpc_priority to 'NULL'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%spanner.rpc_priority='NULL'; +set spanner.rpc_priority to%'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.rpc_priority='NULL'; +_set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='NULL'_; +set spanner.rpc_priority to 'NULL'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_spanner.rpc_priority='NULL'; +set spanner.rpc_priority to_'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.rpc_priority='NULL'; +&set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='NULL'&; +set spanner.rpc_priority to 'NULL'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&spanner.rpc_priority='NULL'; +set spanner.rpc_priority to&'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.rpc_priority='NULL'; +$set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='NULL'$; +set spanner.rpc_priority to 'NULL'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$spanner.rpc_priority='NULL'; +set spanner.rpc_priority to$'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.rpc_priority='NULL'; +@set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='NULL'@; +set spanner.rpc_priority to 'NULL'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@spanner.rpc_priority='NULL'; +set spanner.rpc_priority to@'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.rpc_priority='NULL'; +!set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='NULL'!; +set spanner.rpc_priority to 'NULL'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!spanner.rpc_priority='NULL'; +set spanner.rpc_priority to!'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.rpc_priority='NULL'; +*set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='NULL'*; +set spanner.rpc_priority to 'NULL'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*spanner.rpc_priority='NULL'; +set spanner.rpc_priority to*'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.rpc_priority='NULL'; +(set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='NULL'(; +set spanner.rpc_priority to 'NULL'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(spanner.rpc_priority='NULL'; +set spanner.rpc_priority to('NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.rpc_priority='NULL'; +)set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='NULL'); +set spanner.rpc_priority to 'NULL'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)spanner.rpc_priority='NULL'; +set spanner.rpc_priority to)'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.rpc_priority='NULL'; +-set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='NULL'-; +set spanner.rpc_priority to 'NULL'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-spanner.rpc_priority='NULL'; +set spanner.rpc_priority to-'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.rpc_priority='NULL'; ++set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='NULL'+; +set spanner.rpc_priority to 'NULL'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+spanner.rpc_priority='NULL'; +set spanner.rpc_priority to+'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.rpc_priority='NULL'; +-#set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='NULL'-#; +set spanner.rpc_priority to 'NULL'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#spanner.rpc_priority='NULL'; +set spanner.rpc_priority to-#'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.rpc_priority='NULL'; +/set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='NULL'/; +set spanner.rpc_priority to 'NULL'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/spanner.rpc_priority='NULL'; +set spanner.rpc_priority to/'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.rpc_priority='NULL'; +\set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='NULL'\; +set spanner.rpc_priority to 'NULL'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\spanner.rpc_priority='NULL'; +set spanner.rpc_priority to\'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.rpc_priority='NULL'; +?set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='NULL'?; +set spanner.rpc_priority to 'NULL'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?spanner.rpc_priority='NULL'; +set spanner.rpc_priority to?'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.rpc_priority='NULL'; +-/set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='NULL'-/; +set spanner.rpc_priority to 'NULL'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/spanner.rpc_priority='NULL'; +set spanner.rpc_priority to-/'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.rpc_priority='NULL'; +/#set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='NULL'/#; +set spanner.rpc_priority to 'NULL'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#spanner.rpc_priority='NULL'; +set spanner.rpc_priority to/#'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.rpc_priority='NULL'; +/-set spanner.rpc_priority to 'NULL'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority='NULL'/-; +set spanner.rpc_priority to 'NULL'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-spanner.rpc_priority='NULL'; +set spanner.rpc_priority to/-'NULL'; NEW_CONNECTION; -set spanner.rpc_priority to 'HIGH'; +set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; -SET SPANNER.RPC_PRIORITY TO 'HIGH'; +SET SPANNER.SAVEPOINT_SUPPORT='ENABLED'; NEW_CONNECTION; -set spanner.rpc_priority to 'high'; +set spanner.savepoint_support='enabled'; NEW_CONNECTION; - set spanner.rpc_priority to 'HIGH'; + set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; - set spanner.rpc_priority to 'HIGH'; + set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; -set spanner.rpc_priority to 'HIGH'; +set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; -set spanner.rpc_priority to 'HIGH' ; +set spanner.savepoint_support='ENABLED' ; NEW_CONNECTION; -set spanner.rpc_priority to 'HIGH' ; +set spanner.savepoint_support='ENABLED' ; NEW_CONNECTION; -set spanner.rpc_priority to 'HIGH' +set spanner.savepoint_support='ENABLED' ; NEW_CONNECTION; -set spanner.rpc_priority to 'HIGH'; +set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; -set spanner.rpc_priority to 'HIGH'; +set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; set -spanner.rpc_priority -to -'HIGH'; +spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.rpc_priority to 'HIGH'; +foo set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'HIGH' bar; +set spanner.savepoint_support='ENABLED' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.rpc_priority to 'HIGH'; +%set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'HIGH'%; +set spanner.savepoint_support='ENABLED'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to%'HIGH'; +set%spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.rpc_priority to 'HIGH'; +_set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'HIGH'_; +set spanner.savepoint_support='ENABLED'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to_'HIGH'; +set_spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.rpc_priority to 'HIGH'; +&set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'HIGH'&; +set spanner.savepoint_support='ENABLED'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to&'HIGH'; +set&spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.rpc_priority to 'HIGH'; +$set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'HIGH'$; +set spanner.savepoint_support='ENABLED'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to$'HIGH'; +set$spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.rpc_priority to 'HIGH'; +@set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'HIGH'@; +set spanner.savepoint_support='ENABLED'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to@'HIGH'; +set@spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.rpc_priority to 'HIGH'; +!set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'HIGH'!; +set spanner.savepoint_support='ENABLED'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to!'HIGH'; +set!spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.rpc_priority to 'HIGH'; +*set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'HIGH'*; +set spanner.savepoint_support='ENABLED'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to*'HIGH'; +set*spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.rpc_priority to 'HIGH'; +(set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'HIGH'(; +set spanner.savepoint_support='ENABLED'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to('HIGH'; +set(spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.rpc_priority to 'HIGH'; +)set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'HIGH'); +set spanner.savepoint_support='ENABLED'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to)'HIGH'; +set)spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.rpc_priority to 'HIGH'; +-set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'HIGH'-; +set spanner.savepoint_support='ENABLED'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to-'HIGH'; +set-spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.rpc_priority to 'HIGH'; ++set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'HIGH'+; +set spanner.savepoint_support='ENABLED'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to+'HIGH'; +set+spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.rpc_priority to 'HIGH'; +-#set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'HIGH'-#; +set spanner.savepoint_support='ENABLED'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to-#'HIGH'; +set-#spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.rpc_priority to 'HIGH'; +/set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'HIGH'/; +set spanner.savepoint_support='ENABLED'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to/'HIGH'; +set/spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.rpc_priority to 'HIGH'; +\set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'HIGH'\; +set spanner.savepoint_support='ENABLED'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to\'HIGH'; +set\spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.rpc_priority to 'HIGH'; +?set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'HIGH'?; +set spanner.savepoint_support='ENABLED'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to?'HIGH'; +set?spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.rpc_priority to 'HIGH'; +-/set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'HIGH'-/; +set spanner.savepoint_support='ENABLED'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to-/'HIGH'; +set-/spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.rpc_priority to 'HIGH'; +/#set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'HIGH'/#; +set spanner.savepoint_support='ENABLED'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to/#'HIGH'; +set/#spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.rpc_priority to 'HIGH'; +/-set spanner.savepoint_support='ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'HIGH'/-; +set spanner.savepoint_support='ENABLED'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to/-'HIGH'; +set/-spanner.savepoint_support='ENABLED'; NEW_CONNECTION; -set spanner.rpc_priority to 'MEDIUM'; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; -SET SPANNER.RPC_PRIORITY TO 'MEDIUM'; +SET SPANNER.SAVEPOINT_SUPPORT='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; -set spanner.rpc_priority to 'medium'; +set spanner.savepoint_support='fail_after_rollback'; NEW_CONNECTION; - set spanner.rpc_priority to 'MEDIUM'; + set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; - set spanner.rpc_priority to 'MEDIUM'; + set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; -set spanner.rpc_priority to 'MEDIUM'; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; -set spanner.rpc_priority to 'MEDIUM' ; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK' ; NEW_CONNECTION; -set spanner.rpc_priority to 'MEDIUM' ; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK' ; NEW_CONNECTION; -set spanner.rpc_priority to 'MEDIUM' +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK' ; NEW_CONNECTION; -set spanner.rpc_priority to 'MEDIUM'; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; -set spanner.rpc_priority to 'MEDIUM'; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; set -spanner.rpc_priority -to -'MEDIUM'; +spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.rpc_priority to 'MEDIUM'; +foo set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'MEDIUM' bar; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.rpc_priority to 'MEDIUM'; +%set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'MEDIUM'%; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to%'MEDIUM'; +set%spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.rpc_priority to 'MEDIUM'; +_set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'MEDIUM'_; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to_'MEDIUM'; +set_spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.rpc_priority to 'MEDIUM'; +&set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'MEDIUM'&; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to&'MEDIUM'; +set&spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.rpc_priority to 'MEDIUM'; +$set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'MEDIUM'$; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to$'MEDIUM'; +set$spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.rpc_priority to 'MEDIUM'; +@set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'MEDIUM'@; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to@'MEDIUM'; +set@spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.rpc_priority to 'MEDIUM'; +!set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'MEDIUM'!; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to!'MEDIUM'; +set!spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.rpc_priority to 'MEDIUM'; +*set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'MEDIUM'*; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to*'MEDIUM'; +set*spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.rpc_priority to 'MEDIUM'; +(set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'MEDIUM'(; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to('MEDIUM'; +set(spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.rpc_priority to 'MEDIUM'; +)set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'MEDIUM'); +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to)'MEDIUM'; +set)spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.rpc_priority to 'MEDIUM'; +-set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'MEDIUM'-; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to-'MEDIUM'; +set-spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.rpc_priority to 'MEDIUM'; ++set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'MEDIUM'+; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to+'MEDIUM'; +set+spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.rpc_priority to 'MEDIUM'; +-#set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'MEDIUM'-#; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to-#'MEDIUM'; +set-#spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.rpc_priority to 'MEDIUM'; +/set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'MEDIUM'/; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to/'MEDIUM'; +set/spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.rpc_priority to 'MEDIUM'; +\set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'MEDIUM'\; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to\'MEDIUM'; +set\spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.rpc_priority to 'MEDIUM'; +?set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'MEDIUM'?; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to?'MEDIUM'; +set?spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.rpc_priority to 'MEDIUM'; +-/set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'MEDIUM'-/; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to-/'MEDIUM'; +set-/spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.rpc_priority to 'MEDIUM'; +/#set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'MEDIUM'/#; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to/#'MEDIUM'; +set/#spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.rpc_priority to 'MEDIUM'; +/-set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'MEDIUM'/-; +set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to/-'MEDIUM'; +set/-spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; -set spanner.rpc_priority to 'LOW'; +set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; -SET SPANNER.RPC_PRIORITY TO 'LOW'; +SET SPANNER.SAVEPOINT_SUPPORT='DISABLED'; NEW_CONNECTION; -set spanner.rpc_priority to 'low'; +set spanner.savepoint_support='disabled'; NEW_CONNECTION; - set spanner.rpc_priority to 'LOW'; + set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; - set spanner.rpc_priority to 'LOW'; + set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; -set spanner.rpc_priority to 'LOW'; +set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; -set spanner.rpc_priority to 'LOW' ; +set spanner.savepoint_support='DISABLED' ; NEW_CONNECTION; -set spanner.rpc_priority to 'LOW' ; +set spanner.savepoint_support='DISABLED' ; NEW_CONNECTION; -set spanner.rpc_priority to 'LOW' +set spanner.savepoint_support='DISABLED' ; NEW_CONNECTION; -set spanner.rpc_priority to 'LOW'; +set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; -set spanner.rpc_priority to 'LOW'; +set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; set -spanner.rpc_priority -to -'LOW'; +spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.rpc_priority to 'LOW'; +foo set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'LOW' bar; +set spanner.savepoint_support='DISABLED' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.rpc_priority to 'LOW'; +%set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'LOW'%; +set spanner.savepoint_support='DISABLED'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to%'LOW'; +set%spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.rpc_priority to 'LOW'; +_set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'LOW'_; +set spanner.savepoint_support='DISABLED'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to_'LOW'; +set_spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.rpc_priority to 'LOW'; +&set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'LOW'&; +set spanner.savepoint_support='DISABLED'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to&'LOW'; +set&spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.rpc_priority to 'LOW'; +$set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'LOW'$; +set spanner.savepoint_support='DISABLED'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to$'LOW'; +set$spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.rpc_priority to 'LOW'; +@set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'LOW'@; +set spanner.savepoint_support='DISABLED'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to@'LOW'; +set@spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.rpc_priority to 'LOW'; +!set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'LOW'!; +set spanner.savepoint_support='DISABLED'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to!'LOW'; +set!spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.rpc_priority to 'LOW'; +*set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'LOW'*; +set spanner.savepoint_support='DISABLED'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to*'LOW'; +set*spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.rpc_priority to 'LOW'; +(set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'LOW'(; +set spanner.savepoint_support='DISABLED'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to('LOW'; +set(spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.rpc_priority to 'LOW'; +)set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'LOW'); +set spanner.savepoint_support='DISABLED'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to)'LOW'; +set)spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.rpc_priority to 'LOW'; +-set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'LOW'-; +set spanner.savepoint_support='DISABLED'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to-'LOW'; +set-spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.rpc_priority to 'LOW'; ++set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'LOW'+; +set spanner.savepoint_support='DISABLED'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to+'LOW'; +set+spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.rpc_priority to 'LOW'; +-#set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'LOW'-#; +set spanner.savepoint_support='DISABLED'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to-#'LOW'; +set-#spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.rpc_priority to 'LOW'; +/set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'LOW'/; +set spanner.savepoint_support='DISABLED'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to/'LOW'; +set/spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.rpc_priority to 'LOW'; +\set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'LOW'\; +set spanner.savepoint_support='DISABLED'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to\'LOW'; +set\spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.rpc_priority to 'LOW'; +?set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'LOW'?; +set spanner.savepoint_support='DISABLED'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to?'LOW'; +set?spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.rpc_priority to 'LOW'; +-/set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'LOW'-/; +set spanner.savepoint_support='DISABLED'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to-/'LOW'; +set-/spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.rpc_priority to 'LOW'; +/#set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'LOW'/#; +set spanner.savepoint_support='DISABLED'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to/#'LOW'; +set/#spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.rpc_priority to 'LOW'; +/-set spanner.savepoint_support='DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'LOW'/-; +set spanner.savepoint_support='DISABLED'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to/-'LOW'; +set/-spanner.savepoint_support='DISABLED'; NEW_CONNECTION; -set spanner.rpc_priority to 'NULL'; +set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; -SET SPANNER.RPC_PRIORITY TO 'NULL'; +SET SPANNER.SAVEPOINT_SUPPORT TO 'ENABLED'; NEW_CONNECTION; -set spanner.rpc_priority to 'null'; +set spanner.savepoint_support to 'enabled'; NEW_CONNECTION; - set spanner.rpc_priority to 'NULL'; + set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; - set spanner.rpc_priority to 'NULL'; + set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; -set spanner.rpc_priority to 'NULL'; +set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; -set spanner.rpc_priority to 'NULL' ; +set spanner.savepoint_support to 'ENABLED' ; NEW_CONNECTION; -set spanner.rpc_priority to 'NULL' ; +set spanner.savepoint_support to 'ENABLED' ; NEW_CONNECTION; -set spanner.rpc_priority to 'NULL' +set spanner.savepoint_support to 'ENABLED' ; NEW_CONNECTION; -set spanner.rpc_priority to 'NULL'; +set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; -set spanner.rpc_priority to 'NULL'; +set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; set -spanner.rpc_priority +spanner.savepoint_support to -'NULL'; +'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.rpc_priority to 'NULL'; +foo set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'NULL' bar; +set spanner.savepoint_support to 'ENABLED' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.rpc_priority to 'NULL'; +%set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'NULL'%; +set spanner.savepoint_support to 'ENABLED'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to%'NULL'; +set spanner.savepoint_support to%'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.rpc_priority to 'NULL'; +_set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'NULL'_; +set spanner.savepoint_support to 'ENABLED'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to_'NULL'; +set spanner.savepoint_support to_'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.rpc_priority to 'NULL'; +&set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'NULL'&; +set spanner.savepoint_support to 'ENABLED'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to&'NULL'; +set spanner.savepoint_support to&'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.rpc_priority to 'NULL'; +$set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'NULL'$; +set spanner.savepoint_support to 'ENABLED'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to$'NULL'; +set spanner.savepoint_support to$'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.rpc_priority to 'NULL'; +@set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'NULL'@; +set spanner.savepoint_support to 'ENABLED'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to@'NULL'; +set spanner.savepoint_support to@'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.rpc_priority to 'NULL'; +!set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'NULL'!; +set spanner.savepoint_support to 'ENABLED'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to!'NULL'; +set spanner.savepoint_support to!'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.rpc_priority to 'NULL'; +*set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'NULL'*; +set spanner.savepoint_support to 'ENABLED'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to*'NULL'; +set spanner.savepoint_support to*'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.rpc_priority to 'NULL'; +(set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'NULL'(; +set spanner.savepoint_support to 'ENABLED'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to('NULL'; +set spanner.savepoint_support to('ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.rpc_priority to 'NULL'; +)set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'NULL'); +set spanner.savepoint_support to 'ENABLED'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to)'NULL'; +set spanner.savepoint_support to)'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.rpc_priority to 'NULL'; +-set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'NULL'-; +set spanner.savepoint_support to 'ENABLED'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to-'NULL'; +set spanner.savepoint_support to-'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.rpc_priority to 'NULL'; ++set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'NULL'+; +set spanner.savepoint_support to 'ENABLED'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to+'NULL'; +set spanner.savepoint_support to+'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.rpc_priority to 'NULL'; +-#set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'NULL'-#; +set spanner.savepoint_support to 'ENABLED'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to-#'NULL'; +set spanner.savepoint_support to-#'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.rpc_priority to 'NULL'; +/set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'NULL'/; +set spanner.savepoint_support to 'ENABLED'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to/'NULL'; +set spanner.savepoint_support to/'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.rpc_priority to 'NULL'; +\set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'NULL'\; +set spanner.savepoint_support to 'ENABLED'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to\'NULL'; +set spanner.savepoint_support to\'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.rpc_priority to 'NULL'; +?set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'NULL'?; +set spanner.savepoint_support to 'ENABLED'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to?'NULL'; +set spanner.savepoint_support to?'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.rpc_priority to 'NULL'; +-/set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'NULL'-/; +set spanner.savepoint_support to 'ENABLED'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to-/'NULL'; +set spanner.savepoint_support to-/'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.rpc_priority to 'NULL'; +/#set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'NULL'/#; +set spanner.savepoint_support to 'ENABLED'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to/#'NULL'; +set spanner.savepoint_support to/#'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.rpc_priority to 'NULL'; +/-set spanner.savepoint_support to 'ENABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to 'NULL'/-; +set spanner.savepoint_support to 'ENABLED'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.rpc_priority to/-'NULL'; +set spanner.savepoint_support to/-'ENABLED'; NEW_CONNECTION; -set spanner.savepoint_support='ENABLED'; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; -SET SPANNER.SAVEPOINT_SUPPORT='ENABLED'; +SET SPANNER.SAVEPOINT_SUPPORT TO 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; -set spanner.savepoint_support='enabled'; +set spanner.savepoint_support to 'fail_after_rollback'; NEW_CONNECTION; - set spanner.savepoint_support='ENABLED'; + set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; - set spanner.savepoint_support='ENABLED'; + set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; -set spanner.savepoint_support='ENABLED'; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; -set spanner.savepoint_support='ENABLED' ; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK' ; NEW_CONNECTION; -set spanner.savepoint_support='ENABLED' ; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK' ; NEW_CONNECTION; -set spanner.savepoint_support='ENABLED' +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK' ; NEW_CONNECTION; -set spanner.savepoint_support='ENABLED'; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; -set spanner.savepoint_support='ENABLED'; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; set -spanner.savepoint_support='ENABLED'; +spanner.savepoint_support +to +'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.savepoint_support='ENABLED'; +foo set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='ENABLED' bar; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.savepoint_support='ENABLED'; +%set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='ENABLED'%; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%spanner.savepoint_support='ENABLED'; +set spanner.savepoint_support to%'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.savepoint_support='ENABLED'; +_set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='ENABLED'_; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_spanner.savepoint_support='ENABLED'; +set spanner.savepoint_support to_'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.savepoint_support='ENABLED'; +&set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='ENABLED'&; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&spanner.savepoint_support='ENABLED'; +set spanner.savepoint_support to&'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.savepoint_support='ENABLED'; +$set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='ENABLED'$; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$spanner.savepoint_support='ENABLED'; +set spanner.savepoint_support to$'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.savepoint_support='ENABLED'; +@set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='ENABLED'@; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@spanner.savepoint_support='ENABLED'; +set spanner.savepoint_support to@'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.savepoint_support='ENABLED'; +!set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='ENABLED'!; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!spanner.savepoint_support='ENABLED'; +set spanner.savepoint_support to!'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.savepoint_support='ENABLED'; +*set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='ENABLED'*; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*spanner.savepoint_support='ENABLED'; +set spanner.savepoint_support to*'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.savepoint_support='ENABLED'; +(set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='ENABLED'(; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(spanner.savepoint_support='ENABLED'; +set spanner.savepoint_support to('FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.savepoint_support='ENABLED'; +)set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='ENABLED'); +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)spanner.savepoint_support='ENABLED'; +set spanner.savepoint_support to)'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.savepoint_support='ENABLED'; +-set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='ENABLED'-; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-spanner.savepoint_support='ENABLED'; +set spanner.savepoint_support to-'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.savepoint_support='ENABLED'; ++set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='ENABLED'+; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+spanner.savepoint_support='ENABLED'; +set spanner.savepoint_support to+'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.savepoint_support='ENABLED'; +-#set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='ENABLED'-#; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#spanner.savepoint_support='ENABLED'; +set spanner.savepoint_support to-#'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.savepoint_support='ENABLED'; +/set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='ENABLED'/; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/spanner.savepoint_support='ENABLED'; +set spanner.savepoint_support to/'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.savepoint_support='ENABLED'; +\set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='ENABLED'\; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\spanner.savepoint_support='ENABLED'; +set spanner.savepoint_support to\'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.savepoint_support='ENABLED'; +?set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='ENABLED'?; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?spanner.savepoint_support='ENABLED'; +set spanner.savepoint_support to?'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.savepoint_support='ENABLED'; +-/set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='ENABLED'-/; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/spanner.savepoint_support='ENABLED'; +set spanner.savepoint_support to-/'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.savepoint_support='ENABLED'; +/#set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='ENABLED'/#; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#spanner.savepoint_support='ENABLED'; +set spanner.savepoint_support to/#'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.savepoint_support='ENABLED'; +/-set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='ENABLED'/-; +set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-spanner.savepoint_support='ENABLED'; +set spanner.savepoint_support to/-'FAIL_AFTER_ROLLBACK'; NEW_CONNECTION; -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; -SET SPANNER.SAVEPOINT_SUPPORT='FAIL_AFTER_ROLLBACK'; +SET SPANNER.SAVEPOINT_SUPPORT TO 'DISABLED'; NEW_CONNECTION; -set spanner.savepoint_support='fail_after_rollback'; +set spanner.savepoint_support to 'disabled'; NEW_CONNECTION; - set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; + set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; - set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; + set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK' ; +set spanner.savepoint_support to 'DISABLED' ; NEW_CONNECTION; -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK' ; +set spanner.savepoint_support to 'DISABLED' ; NEW_CONNECTION; -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK' +set spanner.savepoint_support to 'DISABLED' ; NEW_CONNECTION; -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; set -spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +spanner.savepoint_support +to +'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +foo set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK' bar; +set spanner.savepoint_support to 'DISABLED' bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +%set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'%; +set spanner.savepoint_support to 'DISABLED'%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +set spanner.savepoint_support to%'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +_set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'_; +set spanner.savepoint_support to 'DISABLED'_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +set spanner.savepoint_support to_'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +&set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'&; +set spanner.savepoint_support to 'DISABLED'&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +set spanner.savepoint_support to&'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +$set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'$; +set spanner.savepoint_support to 'DISABLED'$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +set spanner.savepoint_support to$'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +@set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'@; +set spanner.savepoint_support to 'DISABLED'@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +set spanner.savepoint_support to@'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +!set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'!; +set spanner.savepoint_support to 'DISABLED'!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +set spanner.savepoint_support to!'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +*set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'*; +set spanner.savepoint_support to 'DISABLED'*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +set spanner.savepoint_support to*'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +(set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'(; +set spanner.savepoint_support to 'DISABLED'(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +set spanner.savepoint_support to('DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +)set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'); +set spanner.savepoint_support to 'DISABLED'); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +set spanner.savepoint_support to)'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +-set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'-; +set spanner.savepoint_support to 'DISABLED'-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +set spanner.savepoint_support to-'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; ++set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'+; +set spanner.savepoint_support to 'DISABLED'+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +set spanner.savepoint_support to+'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +-#set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'-#; +set spanner.savepoint_support to 'DISABLED'-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +set spanner.savepoint_support to-#'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +/set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'/; +set spanner.savepoint_support to 'DISABLED'/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +set spanner.savepoint_support to/'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +\set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'\; +set spanner.savepoint_support to 'DISABLED'\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +set spanner.savepoint_support to\'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +?set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'?; +set spanner.savepoint_support to 'DISABLED'?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +set spanner.savepoint_support to?'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +-/set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'-/; +set spanner.savepoint_support to 'DISABLED'-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +set spanner.savepoint_support to-/'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +/#set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'/#; +set spanner.savepoint_support to 'DISABLED'/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +set spanner.savepoint_support to/#'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +/-set spanner.savepoint_support to 'DISABLED'; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='FAIL_AFTER_ROLLBACK'/-; +set spanner.savepoint_support to 'DISABLED'/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-spanner.savepoint_support='FAIL_AFTER_ROLLBACK'; +set spanner.savepoint_support to/-'DISABLED'; NEW_CONNECTION; -set spanner.savepoint_support='DISABLED'; +set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; -SET SPANNER.SAVEPOINT_SUPPORT='DISABLED'; +SET SPANNER.DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE = TRUE; NEW_CONNECTION; -set spanner.savepoint_support='disabled'; +set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; - set spanner.savepoint_support='DISABLED'; + set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; - set spanner.savepoint_support='DISABLED'; + set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; -set spanner.savepoint_support='DISABLED'; +set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; -set spanner.savepoint_support='DISABLED' ; +set spanner.delay_transaction_start_until_first_write = true ; NEW_CONNECTION; -set spanner.savepoint_support='DISABLED' ; +set spanner.delay_transaction_start_until_first_write = true ; NEW_CONNECTION; -set spanner.savepoint_support='DISABLED' +set spanner.delay_transaction_start_until_first_write = true ; NEW_CONNECTION; -set spanner.savepoint_support='DISABLED'; +set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; -set spanner.savepoint_support='DISABLED'; +set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; set -spanner.savepoint_support='DISABLED'; +spanner.delay_transaction_start_until_first_write += +true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.savepoint_support='DISABLED'; +foo set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='DISABLED' bar; +set spanner.delay_transaction_start_until_first_write = true bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.savepoint_support='DISABLED'; +%set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='DISABLED'%; +set spanner.delay_transaction_start_until_first_write = true%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set%spanner.savepoint_support='DISABLED'; +set spanner.delay_transaction_start_until_first_write =%true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.savepoint_support='DISABLED'; +_set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='DISABLED'_; +set spanner.delay_transaction_start_until_first_write = true_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set_spanner.savepoint_support='DISABLED'; +set spanner.delay_transaction_start_until_first_write =_true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.savepoint_support='DISABLED'; +&set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='DISABLED'&; +set spanner.delay_transaction_start_until_first_write = true&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set&spanner.savepoint_support='DISABLED'; +set spanner.delay_transaction_start_until_first_write =&true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.savepoint_support='DISABLED'; +$set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='DISABLED'$; +set spanner.delay_transaction_start_until_first_write = true$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set$spanner.savepoint_support='DISABLED'; +set spanner.delay_transaction_start_until_first_write =$true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.savepoint_support='DISABLED'; +@set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='DISABLED'@; +set spanner.delay_transaction_start_until_first_write = true@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set@spanner.savepoint_support='DISABLED'; +set spanner.delay_transaction_start_until_first_write =@true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.savepoint_support='DISABLED'; +!set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='DISABLED'!; +set spanner.delay_transaction_start_until_first_write = true!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set!spanner.savepoint_support='DISABLED'; +set spanner.delay_transaction_start_until_first_write =!true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.savepoint_support='DISABLED'; +*set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='DISABLED'*; +set spanner.delay_transaction_start_until_first_write = true*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set*spanner.savepoint_support='DISABLED'; +set spanner.delay_transaction_start_until_first_write =*true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.savepoint_support='DISABLED'; +(set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='DISABLED'(; +set spanner.delay_transaction_start_until_first_write = true(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set(spanner.savepoint_support='DISABLED'; +set spanner.delay_transaction_start_until_first_write =(true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.savepoint_support='DISABLED'; +)set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='DISABLED'); +set spanner.delay_transaction_start_until_first_write = true); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set)spanner.savepoint_support='DISABLED'; +set spanner.delay_transaction_start_until_first_write =)true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.savepoint_support='DISABLED'; +-set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='DISABLED'-; +set spanner.delay_transaction_start_until_first_write = true-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-spanner.savepoint_support='DISABLED'; +set spanner.delay_transaction_start_until_first_write =-true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.savepoint_support='DISABLED'; ++set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='DISABLED'+; +set spanner.delay_transaction_start_until_first_write = true+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set+spanner.savepoint_support='DISABLED'; +set spanner.delay_transaction_start_until_first_write =+true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.savepoint_support='DISABLED'; +-#set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='DISABLED'-#; +set spanner.delay_transaction_start_until_first_write = true-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-#spanner.savepoint_support='DISABLED'; +set spanner.delay_transaction_start_until_first_write =-#true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.savepoint_support='DISABLED'; +/set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='DISABLED'/; +set spanner.delay_transaction_start_until_first_write = true/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/spanner.savepoint_support='DISABLED'; +set spanner.delay_transaction_start_until_first_write =/true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.savepoint_support='DISABLED'; +\set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='DISABLED'\; +set spanner.delay_transaction_start_until_first_write = true\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set\spanner.savepoint_support='DISABLED'; +set spanner.delay_transaction_start_until_first_write =\true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.savepoint_support='DISABLED'; +?set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='DISABLED'?; +set spanner.delay_transaction_start_until_first_write = true?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set?spanner.savepoint_support='DISABLED'; +set spanner.delay_transaction_start_until_first_write =?true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.savepoint_support='DISABLED'; +-/set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='DISABLED'-/; +set spanner.delay_transaction_start_until_first_write = true-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set-/spanner.savepoint_support='DISABLED'; +set spanner.delay_transaction_start_until_first_write =-/true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.savepoint_support='DISABLED'; +/#set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='DISABLED'/#; +set spanner.delay_transaction_start_until_first_write = true/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/#spanner.savepoint_support='DISABLED'; +set spanner.delay_transaction_start_until_first_write =/#true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.savepoint_support='DISABLED'; +/-set spanner.delay_transaction_start_until_first_write = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support='DISABLED'/-; +set spanner.delay_transaction_start_until_first_write = true/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set/-spanner.savepoint_support='DISABLED'; +set spanner.delay_transaction_start_until_first_write =/-true; NEW_CONNECTION; -set spanner.savepoint_support to 'ENABLED'; +set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; -SET SPANNER.SAVEPOINT_SUPPORT TO 'ENABLED'; +SET SPANNER.DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE = FALSE; NEW_CONNECTION; -set spanner.savepoint_support to 'enabled'; +set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; - set spanner.savepoint_support to 'ENABLED'; + set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; - set spanner.savepoint_support to 'ENABLED'; + set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; -set spanner.savepoint_support to 'ENABLED'; +set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; -set spanner.savepoint_support to 'ENABLED' ; +set spanner.delay_transaction_start_until_first_write = false ; NEW_CONNECTION; -set spanner.savepoint_support to 'ENABLED' ; +set spanner.delay_transaction_start_until_first_write = false ; NEW_CONNECTION; -set spanner.savepoint_support to 'ENABLED' +set spanner.delay_transaction_start_until_first_write = false ; NEW_CONNECTION; -set spanner.savepoint_support to 'ENABLED'; +set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; -set spanner.savepoint_support to 'ENABLED'; +set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; set -spanner.savepoint_support -to -'ENABLED'; +spanner.delay_transaction_start_until_first_write += +false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.savepoint_support to 'ENABLED'; +foo set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'ENABLED' bar; +set spanner.delay_transaction_start_until_first_write = false bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.savepoint_support to 'ENABLED'; +%set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'ENABLED'%; +set spanner.delay_transaction_start_until_first_write = false%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to%'ENABLED'; +set spanner.delay_transaction_start_until_first_write =%false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.savepoint_support to 'ENABLED'; +_set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'ENABLED'_; +set spanner.delay_transaction_start_until_first_write = false_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to_'ENABLED'; +set spanner.delay_transaction_start_until_first_write =_false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.savepoint_support to 'ENABLED'; +&set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'ENABLED'&; +set spanner.delay_transaction_start_until_first_write = false&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to&'ENABLED'; +set spanner.delay_transaction_start_until_first_write =&false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.savepoint_support to 'ENABLED'; +$set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'ENABLED'$; +set spanner.delay_transaction_start_until_first_write = false$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to$'ENABLED'; +set spanner.delay_transaction_start_until_first_write =$false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.savepoint_support to 'ENABLED'; +@set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'ENABLED'@; +set spanner.delay_transaction_start_until_first_write = false@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to@'ENABLED'; +set spanner.delay_transaction_start_until_first_write =@false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.savepoint_support to 'ENABLED'; +!set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'ENABLED'!; +set spanner.delay_transaction_start_until_first_write = false!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to!'ENABLED'; +set spanner.delay_transaction_start_until_first_write =!false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.savepoint_support to 'ENABLED'; +*set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'ENABLED'*; +set spanner.delay_transaction_start_until_first_write = false*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to*'ENABLED'; +set spanner.delay_transaction_start_until_first_write =*false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.savepoint_support to 'ENABLED'; +(set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'ENABLED'(; +set spanner.delay_transaction_start_until_first_write = false(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to('ENABLED'; +set spanner.delay_transaction_start_until_first_write =(false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.savepoint_support to 'ENABLED'; +)set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'ENABLED'); +set spanner.delay_transaction_start_until_first_write = false); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to)'ENABLED'; +set spanner.delay_transaction_start_until_first_write =)false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.savepoint_support to 'ENABLED'; +-set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'ENABLED'-; +set spanner.delay_transaction_start_until_first_write = false-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to-'ENABLED'; +set spanner.delay_transaction_start_until_first_write =-false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.savepoint_support to 'ENABLED'; ++set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'ENABLED'+; +set spanner.delay_transaction_start_until_first_write = false+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to+'ENABLED'; +set spanner.delay_transaction_start_until_first_write =+false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.savepoint_support to 'ENABLED'; +-#set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'ENABLED'-#; +set spanner.delay_transaction_start_until_first_write = false-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to-#'ENABLED'; +set spanner.delay_transaction_start_until_first_write =-#false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.savepoint_support to 'ENABLED'; +/set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'ENABLED'/; +set spanner.delay_transaction_start_until_first_write = false/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to/'ENABLED'; +set spanner.delay_transaction_start_until_first_write =/false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.savepoint_support to 'ENABLED'; +\set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'ENABLED'\; +set spanner.delay_transaction_start_until_first_write = false\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to\'ENABLED'; +set spanner.delay_transaction_start_until_first_write =\false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.savepoint_support to 'ENABLED'; +?set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'ENABLED'?; +set spanner.delay_transaction_start_until_first_write = false?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to?'ENABLED'; +set spanner.delay_transaction_start_until_first_write =?false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.savepoint_support to 'ENABLED'; +-/set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'ENABLED'-/; +set spanner.delay_transaction_start_until_first_write = false-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to-/'ENABLED'; +set spanner.delay_transaction_start_until_first_write =-/false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.savepoint_support to 'ENABLED'; +/#set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'ENABLED'/#; +set spanner.delay_transaction_start_until_first_write = false/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to/#'ENABLED'; +set spanner.delay_transaction_start_until_first_write =/#false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.savepoint_support to 'ENABLED'; +/-set spanner.delay_transaction_start_until_first_write = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'ENABLED'/-; +set spanner.delay_transaction_start_until_first_write = false/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to/-'ENABLED'; +set spanner.delay_transaction_start_until_first_write =/-false; NEW_CONNECTION; -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; -SET SPANNER.SAVEPOINT_SUPPORT TO 'FAIL_AFTER_ROLLBACK'; +SET SPANNER.DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE TO TRUE; NEW_CONNECTION; -set spanner.savepoint_support to 'fail_after_rollback'; +set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; - set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; + set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; - set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; + set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK' ; +set spanner.delay_transaction_start_until_first_write to true ; NEW_CONNECTION; -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK' ; +set spanner.delay_transaction_start_until_first_write to true ; NEW_CONNECTION; -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK' +set spanner.delay_transaction_start_until_first_write to true ; NEW_CONNECTION; -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; set -spanner.savepoint_support +spanner.delay_transaction_start_until_first_write to -'FAIL_AFTER_ROLLBACK'; +true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +foo set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK' bar; +set spanner.delay_transaction_start_until_first_write to true bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +%set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'%; +set spanner.delay_transaction_start_until_first_write to true%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to%'FAIL_AFTER_ROLLBACK'; +set spanner.delay_transaction_start_until_first_write to%true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +_set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'_; +set spanner.delay_transaction_start_until_first_write to true_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to_'FAIL_AFTER_ROLLBACK'; +set spanner.delay_transaction_start_until_first_write to_true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +&set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'&; +set spanner.delay_transaction_start_until_first_write to true&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to&'FAIL_AFTER_ROLLBACK'; +set spanner.delay_transaction_start_until_first_write to&true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +$set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'$; +set spanner.delay_transaction_start_until_first_write to true$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to$'FAIL_AFTER_ROLLBACK'; +set spanner.delay_transaction_start_until_first_write to$true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +@set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'@; +set spanner.delay_transaction_start_until_first_write to true@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to@'FAIL_AFTER_ROLLBACK'; +set spanner.delay_transaction_start_until_first_write to@true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +!set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'!; +set spanner.delay_transaction_start_until_first_write to true!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to!'FAIL_AFTER_ROLLBACK'; +set spanner.delay_transaction_start_until_first_write to!true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +*set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'*; +set spanner.delay_transaction_start_until_first_write to true*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to*'FAIL_AFTER_ROLLBACK'; +set spanner.delay_transaction_start_until_first_write to*true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +(set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'(; +set spanner.delay_transaction_start_until_first_write to true(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to('FAIL_AFTER_ROLLBACK'; +set spanner.delay_transaction_start_until_first_write to(true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +)set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'); +set spanner.delay_transaction_start_until_first_write to true); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to)'FAIL_AFTER_ROLLBACK'; +set spanner.delay_transaction_start_until_first_write to)true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +-set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'-; +set spanner.delay_transaction_start_until_first_write to true-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to-'FAIL_AFTER_ROLLBACK'; +set spanner.delay_transaction_start_until_first_write to-true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; ++set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'+; +set spanner.delay_transaction_start_until_first_write to true+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to+'FAIL_AFTER_ROLLBACK'; +set spanner.delay_transaction_start_until_first_write to+true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +-#set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'-#; +set spanner.delay_transaction_start_until_first_write to true-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to-#'FAIL_AFTER_ROLLBACK'; +set spanner.delay_transaction_start_until_first_write to-#true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +/set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'/; +set spanner.delay_transaction_start_until_first_write to true/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to/'FAIL_AFTER_ROLLBACK'; +set spanner.delay_transaction_start_until_first_write to/true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +\set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'\; +set spanner.delay_transaction_start_until_first_write to true\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to\'FAIL_AFTER_ROLLBACK'; +set spanner.delay_transaction_start_until_first_write to\true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +?set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'?; +set spanner.delay_transaction_start_until_first_write to true?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to?'FAIL_AFTER_ROLLBACK'; +set spanner.delay_transaction_start_until_first_write to?true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +-/set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'-/; +set spanner.delay_transaction_start_until_first_write to true-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to-/'FAIL_AFTER_ROLLBACK'; +set spanner.delay_transaction_start_until_first_write to-/true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +/#set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'/#; +set spanner.delay_transaction_start_until_first_write to true/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to/#'FAIL_AFTER_ROLLBACK'; +set spanner.delay_transaction_start_until_first_write to/#true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'; +/-set spanner.delay_transaction_start_until_first_write to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'FAIL_AFTER_ROLLBACK'/-; +set spanner.delay_transaction_start_until_first_write to true/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to/-'FAIL_AFTER_ROLLBACK'; +set spanner.delay_transaction_start_until_first_write to/-true; NEW_CONNECTION; -set spanner.savepoint_support to 'DISABLED'; +set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; -SET SPANNER.SAVEPOINT_SUPPORT TO 'DISABLED'; +SET SPANNER.DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE TO FALSE; NEW_CONNECTION; -set spanner.savepoint_support to 'disabled'; +set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; - set spanner.savepoint_support to 'DISABLED'; + set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; - set spanner.savepoint_support to 'DISABLED'; + set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; -set spanner.savepoint_support to 'DISABLED'; +set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; -set spanner.savepoint_support to 'DISABLED' ; +set spanner.delay_transaction_start_until_first_write to false ; NEW_CONNECTION; -set spanner.savepoint_support to 'DISABLED' ; +set spanner.delay_transaction_start_until_first_write to false ; NEW_CONNECTION; -set spanner.savepoint_support to 'DISABLED' +set spanner.delay_transaction_start_until_first_write to false ; NEW_CONNECTION; -set spanner.savepoint_support to 'DISABLED'; +set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; -set spanner.savepoint_support to 'DISABLED'; +set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; set -spanner.savepoint_support +spanner.delay_transaction_start_until_first_write to -'DISABLED'; +false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.savepoint_support to 'DISABLED'; +foo set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'DISABLED' bar; +set spanner.delay_transaction_start_until_first_write to false bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.savepoint_support to 'DISABLED'; +%set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'DISABLED'%; +set spanner.delay_transaction_start_until_first_write to false%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to%'DISABLED'; +set spanner.delay_transaction_start_until_first_write to%false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.savepoint_support to 'DISABLED'; +_set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'DISABLED'_; +set spanner.delay_transaction_start_until_first_write to false_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to_'DISABLED'; +set spanner.delay_transaction_start_until_first_write to_false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.savepoint_support to 'DISABLED'; +&set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'DISABLED'&; +set spanner.delay_transaction_start_until_first_write to false&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to&'DISABLED'; +set spanner.delay_transaction_start_until_first_write to&false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.savepoint_support to 'DISABLED'; +$set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'DISABLED'$; +set spanner.delay_transaction_start_until_first_write to false$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to$'DISABLED'; +set spanner.delay_transaction_start_until_first_write to$false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.savepoint_support to 'DISABLED'; +@set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'DISABLED'@; +set spanner.delay_transaction_start_until_first_write to false@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to@'DISABLED'; +set spanner.delay_transaction_start_until_first_write to@false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.savepoint_support to 'DISABLED'; +!set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'DISABLED'!; +set spanner.delay_transaction_start_until_first_write to false!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to!'DISABLED'; +set spanner.delay_transaction_start_until_first_write to!false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.savepoint_support to 'DISABLED'; +*set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'DISABLED'*; +set spanner.delay_transaction_start_until_first_write to false*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to*'DISABLED'; +set spanner.delay_transaction_start_until_first_write to*false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.savepoint_support to 'DISABLED'; +(set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'DISABLED'(; +set spanner.delay_transaction_start_until_first_write to false(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to('DISABLED'; +set spanner.delay_transaction_start_until_first_write to(false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.savepoint_support to 'DISABLED'; +)set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'DISABLED'); +set spanner.delay_transaction_start_until_first_write to false); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to)'DISABLED'; +set spanner.delay_transaction_start_until_first_write to)false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.savepoint_support to 'DISABLED'; +-set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'DISABLED'-; +set spanner.delay_transaction_start_until_first_write to false-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to-'DISABLED'; +set spanner.delay_transaction_start_until_first_write to-false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.savepoint_support to 'DISABLED'; ++set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'DISABLED'+; +set spanner.delay_transaction_start_until_first_write to false+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to+'DISABLED'; +set spanner.delay_transaction_start_until_first_write to+false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.savepoint_support to 'DISABLED'; +-#set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'DISABLED'-#; +set spanner.delay_transaction_start_until_first_write to false-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to-#'DISABLED'; +set spanner.delay_transaction_start_until_first_write to-#false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.savepoint_support to 'DISABLED'; +/set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'DISABLED'/; +set spanner.delay_transaction_start_until_first_write to false/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to/'DISABLED'; +set spanner.delay_transaction_start_until_first_write to/false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.savepoint_support to 'DISABLED'; +\set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'DISABLED'\; +set spanner.delay_transaction_start_until_first_write to false\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to\'DISABLED'; +set spanner.delay_transaction_start_until_first_write to\false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.savepoint_support to 'DISABLED'; +?set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'DISABLED'?; +set spanner.delay_transaction_start_until_first_write to false?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to?'DISABLED'; +set spanner.delay_transaction_start_until_first_write to?false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.savepoint_support to 'DISABLED'; +-/set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'DISABLED'-/; +set spanner.delay_transaction_start_until_first_write to false-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to-/'DISABLED'; +set spanner.delay_transaction_start_until_first_write to-/false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.savepoint_support to 'DISABLED'; +/#set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'DISABLED'/#; +set spanner.delay_transaction_start_until_first_write to false/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to/#'DISABLED'; +set spanner.delay_transaction_start_until_first_write to/#false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.savepoint_support to 'DISABLED'; +/-set spanner.delay_transaction_start_until_first_write to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to 'DISABLED'/-; +set spanner.delay_transaction_start_until_first_write to false/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.savepoint_support to/-'DISABLED'; +set spanner.delay_transaction_start_until_first_write to/-false; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write = true; +set spanner.keep_transaction_alive = true; NEW_CONNECTION; -SET SPANNER.DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE = TRUE; +SET SPANNER.KEEP_TRANSACTION_ALIVE = TRUE; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write = true; +set spanner.keep_transaction_alive = true; NEW_CONNECTION; - set spanner.delay_transaction_start_until_first_write = true; + set spanner.keep_transaction_alive = true; NEW_CONNECTION; - set spanner.delay_transaction_start_until_first_write = true; + set spanner.keep_transaction_alive = true; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write = true; +set spanner.keep_transaction_alive = true; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write = true ; +set spanner.keep_transaction_alive = true ; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write = true ; +set spanner.keep_transaction_alive = true ; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write = true +set spanner.keep_transaction_alive = true ; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write = true; +set spanner.keep_transaction_alive = true; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write = true; +set spanner.keep_transaction_alive = true; NEW_CONNECTION; set -spanner.delay_transaction_start_until_first_write +spanner.keep_transaction_alive = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.delay_transaction_start_until_first_write = true; +foo set spanner.keep_transaction_alive = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = true bar; +set spanner.keep_transaction_alive = true bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.delay_transaction_start_until_first_write = true; +%set spanner.keep_transaction_alive = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = true%; +set spanner.keep_transaction_alive = true%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =%true; +set spanner.keep_transaction_alive =%true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.delay_transaction_start_until_first_write = true; +_set spanner.keep_transaction_alive = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = true_; +set spanner.keep_transaction_alive = true_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =_true; +set spanner.keep_transaction_alive =_true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.delay_transaction_start_until_first_write = true; +&set spanner.keep_transaction_alive = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = true&; +set spanner.keep_transaction_alive = true&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =&true; +set spanner.keep_transaction_alive =&true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.delay_transaction_start_until_first_write = true; +$set spanner.keep_transaction_alive = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = true$; +set spanner.keep_transaction_alive = true$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =$true; +set spanner.keep_transaction_alive =$true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.delay_transaction_start_until_first_write = true; +@set spanner.keep_transaction_alive = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = true@; +set spanner.keep_transaction_alive = true@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =@true; +set spanner.keep_transaction_alive =@true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.delay_transaction_start_until_first_write = true; +!set spanner.keep_transaction_alive = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = true!; +set spanner.keep_transaction_alive = true!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =!true; +set spanner.keep_transaction_alive =!true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.delay_transaction_start_until_first_write = true; +*set spanner.keep_transaction_alive = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = true*; +set spanner.keep_transaction_alive = true*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =*true; +set spanner.keep_transaction_alive =*true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.delay_transaction_start_until_first_write = true; +(set spanner.keep_transaction_alive = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = true(; +set spanner.keep_transaction_alive = true(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =(true; +set spanner.keep_transaction_alive =(true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.delay_transaction_start_until_first_write = true; +)set spanner.keep_transaction_alive = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = true); +set spanner.keep_transaction_alive = true); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =)true; +set spanner.keep_transaction_alive =)true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.delay_transaction_start_until_first_write = true; +-set spanner.keep_transaction_alive = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = true-; +set spanner.keep_transaction_alive = true-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =-true; +set spanner.keep_transaction_alive =-true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.delay_transaction_start_until_first_write = true; ++set spanner.keep_transaction_alive = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = true+; +set spanner.keep_transaction_alive = true+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =+true; +set spanner.keep_transaction_alive =+true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.delay_transaction_start_until_first_write = true; +-#set spanner.keep_transaction_alive = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = true-#; +set spanner.keep_transaction_alive = true-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =-#true; +set spanner.keep_transaction_alive =-#true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.delay_transaction_start_until_first_write = true; +/set spanner.keep_transaction_alive = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = true/; +set spanner.keep_transaction_alive = true/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =/true; +set spanner.keep_transaction_alive =/true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.delay_transaction_start_until_first_write = true; +\set spanner.keep_transaction_alive = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = true\; +set spanner.keep_transaction_alive = true\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =\true; +set spanner.keep_transaction_alive =\true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.delay_transaction_start_until_first_write = true; +?set spanner.keep_transaction_alive = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = true?; +set spanner.keep_transaction_alive = true?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =?true; +set spanner.keep_transaction_alive =?true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.delay_transaction_start_until_first_write = true; +-/set spanner.keep_transaction_alive = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = true-/; +set spanner.keep_transaction_alive = true-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =-/true; +set spanner.keep_transaction_alive =-/true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.delay_transaction_start_until_first_write = true; +/#set spanner.keep_transaction_alive = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = true/#; +set spanner.keep_transaction_alive = true/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =/#true; +set spanner.keep_transaction_alive =/#true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.delay_transaction_start_until_first_write = true; +/-set spanner.keep_transaction_alive = true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = true/-; +set spanner.keep_transaction_alive = true/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =/-true; +set spanner.keep_transaction_alive =/-true; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write = false; +set spanner.keep_transaction_alive = false; NEW_CONNECTION; -SET SPANNER.DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE = FALSE; +SET SPANNER.KEEP_TRANSACTION_ALIVE = FALSE; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write = false; +set spanner.keep_transaction_alive = false; NEW_CONNECTION; - set spanner.delay_transaction_start_until_first_write = false; + set spanner.keep_transaction_alive = false; NEW_CONNECTION; - set spanner.delay_transaction_start_until_first_write = false; + set spanner.keep_transaction_alive = false; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write = false; +set spanner.keep_transaction_alive = false; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write = false ; +set spanner.keep_transaction_alive = false ; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write = false ; +set spanner.keep_transaction_alive = false ; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write = false +set spanner.keep_transaction_alive = false ; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write = false; +set spanner.keep_transaction_alive = false; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write = false; +set spanner.keep_transaction_alive = false; NEW_CONNECTION; set -spanner.delay_transaction_start_until_first_write +spanner.keep_transaction_alive = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.delay_transaction_start_until_first_write = false; +foo set spanner.keep_transaction_alive = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = false bar; +set spanner.keep_transaction_alive = false bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.delay_transaction_start_until_first_write = false; +%set spanner.keep_transaction_alive = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = false%; +set spanner.keep_transaction_alive = false%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =%false; +set spanner.keep_transaction_alive =%false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.delay_transaction_start_until_first_write = false; +_set spanner.keep_transaction_alive = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = false_; +set spanner.keep_transaction_alive = false_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =_false; +set spanner.keep_transaction_alive =_false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.delay_transaction_start_until_first_write = false; +&set spanner.keep_transaction_alive = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = false&; +set spanner.keep_transaction_alive = false&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =&false; +set spanner.keep_transaction_alive =&false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.delay_transaction_start_until_first_write = false; +$set spanner.keep_transaction_alive = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = false$; +set spanner.keep_transaction_alive = false$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =$false; +set spanner.keep_transaction_alive =$false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.delay_transaction_start_until_first_write = false; +@set spanner.keep_transaction_alive = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = false@; +set spanner.keep_transaction_alive = false@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =@false; +set spanner.keep_transaction_alive =@false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.delay_transaction_start_until_first_write = false; +!set spanner.keep_transaction_alive = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = false!; +set spanner.keep_transaction_alive = false!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =!false; +set spanner.keep_transaction_alive =!false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.delay_transaction_start_until_first_write = false; +*set spanner.keep_transaction_alive = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = false*; +set spanner.keep_transaction_alive = false*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =*false; +set spanner.keep_transaction_alive =*false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.delay_transaction_start_until_first_write = false; +(set spanner.keep_transaction_alive = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = false(; +set spanner.keep_transaction_alive = false(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =(false; +set spanner.keep_transaction_alive =(false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.delay_transaction_start_until_first_write = false; +)set spanner.keep_transaction_alive = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = false); +set spanner.keep_transaction_alive = false); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =)false; +set spanner.keep_transaction_alive =)false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.delay_transaction_start_until_first_write = false; +-set spanner.keep_transaction_alive = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = false-; +set spanner.keep_transaction_alive = false-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =-false; +set spanner.keep_transaction_alive =-false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.delay_transaction_start_until_first_write = false; ++set spanner.keep_transaction_alive = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = false+; +set spanner.keep_transaction_alive = false+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =+false; +set spanner.keep_transaction_alive =+false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.delay_transaction_start_until_first_write = false; +-#set spanner.keep_transaction_alive = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = false-#; +set spanner.keep_transaction_alive = false-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =-#false; +set spanner.keep_transaction_alive =-#false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.delay_transaction_start_until_first_write = false; +/set spanner.keep_transaction_alive = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = false/; +set spanner.keep_transaction_alive = false/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =/false; +set spanner.keep_transaction_alive =/false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.delay_transaction_start_until_first_write = false; +\set spanner.keep_transaction_alive = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = false\; +set spanner.keep_transaction_alive = false\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =\false; +set spanner.keep_transaction_alive =\false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.delay_transaction_start_until_first_write = false; +?set spanner.keep_transaction_alive = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = false?; +set spanner.keep_transaction_alive = false?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =?false; +set spanner.keep_transaction_alive =?false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.delay_transaction_start_until_first_write = false; +-/set spanner.keep_transaction_alive = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = false-/; +set spanner.keep_transaction_alive = false-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =-/false; +set spanner.keep_transaction_alive =-/false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.delay_transaction_start_until_first_write = false; +/#set spanner.keep_transaction_alive = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = false/#; +set spanner.keep_transaction_alive = false/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =/#false; +set spanner.keep_transaction_alive =/#false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.delay_transaction_start_until_first_write = false; +/-set spanner.keep_transaction_alive = false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write = false/-; +set spanner.keep_transaction_alive = false/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write =/-false; +set spanner.keep_transaction_alive =/-false; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write to true; +set spanner.keep_transaction_alive to true; NEW_CONNECTION; -SET SPANNER.DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE TO TRUE; +SET SPANNER.KEEP_TRANSACTION_ALIVE TO TRUE; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write to true; +set spanner.keep_transaction_alive to true; NEW_CONNECTION; - set spanner.delay_transaction_start_until_first_write to true; + set spanner.keep_transaction_alive to true; NEW_CONNECTION; - set spanner.delay_transaction_start_until_first_write to true; + set spanner.keep_transaction_alive to true; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write to true; +set spanner.keep_transaction_alive to true; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write to true ; +set spanner.keep_transaction_alive to true ; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write to true ; +set spanner.keep_transaction_alive to true ; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write to true +set spanner.keep_transaction_alive to true ; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write to true; +set spanner.keep_transaction_alive to true; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write to true; +set spanner.keep_transaction_alive to true; NEW_CONNECTION; set -spanner.delay_transaction_start_until_first_write +spanner.keep_transaction_alive to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.delay_transaction_start_until_first_write to true; +foo set spanner.keep_transaction_alive to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to true bar; +set spanner.keep_transaction_alive to true bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.delay_transaction_start_until_first_write to true; +%set spanner.keep_transaction_alive to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to true%; +set spanner.keep_transaction_alive to true%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to%true; +set spanner.keep_transaction_alive to%true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.delay_transaction_start_until_first_write to true; +_set spanner.keep_transaction_alive to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to true_; +set spanner.keep_transaction_alive to true_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to_true; +set spanner.keep_transaction_alive to_true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.delay_transaction_start_until_first_write to true; +&set spanner.keep_transaction_alive to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to true&; +set spanner.keep_transaction_alive to true&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to&true; +set spanner.keep_transaction_alive to&true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.delay_transaction_start_until_first_write to true; +$set spanner.keep_transaction_alive to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to true$; +set spanner.keep_transaction_alive to true$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to$true; +set spanner.keep_transaction_alive to$true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.delay_transaction_start_until_first_write to true; +@set spanner.keep_transaction_alive to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to true@; +set spanner.keep_transaction_alive to true@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to@true; +set spanner.keep_transaction_alive to@true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.delay_transaction_start_until_first_write to true; +!set spanner.keep_transaction_alive to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to true!; +set spanner.keep_transaction_alive to true!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to!true; +set spanner.keep_transaction_alive to!true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.delay_transaction_start_until_first_write to true; +*set spanner.keep_transaction_alive to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to true*; +set spanner.keep_transaction_alive to true*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to*true; +set spanner.keep_transaction_alive to*true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.delay_transaction_start_until_first_write to true; +(set spanner.keep_transaction_alive to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to true(; +set spanner.keep_transaction_alive to true(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to(true; +set spanner.keep_transaction_alive to(true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.delay_transaction_start_until_first_write to true; +)set spanner.keep_transaction_alive to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to true); +set spanner.keep_transaction_alive to true); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to)true; +set spanner.keep_transaction_alive to)true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.delay_transaction_start_until_first_write to true; +-set spanner.keep_transaction_alive to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to true-; +set spanner.keep_transaction_alive to true-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to-true; +set spanner.keep_transaction_alive to-true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.delay_transaction_start_until_first_write to true; ++set spanner.keep_transaction_alive to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to true+; +set spanner.keep_transaction_alive to true+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to+true; +set spanner.keep_transaction_alive to+true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.delay_transaction_start_until_first_write to true; +-#set spanner.keep_transaction_alive to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to true-#; +set spanner.keep_transaction_alive to true-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to-#true; +set spanner.keep_transaction_alive to-#true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.delay_transaction_start_until_first_write to true; +/set spanner.keep_transaction_alive to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to true/; +set spanner.keep_transaction_alive to true/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to/true; +set spanner.keep_transaction_alive to/true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.delay_transaction_start_until_first_write to true; +\set spanner.keep_transaction_alive to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to true\; +set spanner.keep_transaction_alive to true\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to\true; +set spanner.keep_transaction_alive to\true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.delay_transaction_start_until_first_write to true; +?set spanner.keep_transaction_alive to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to true?; +set spanner.keep_transaction_alive to true?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to?true; +set spanner.keep_transaction_alive to?true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.delay_transaction_start_until_first_write to true; +-/set spanner.keep_transaction_alive to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to true-/; +set spanner.keep_transaction_alive to true-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to-/true; +set spanner.keep_transaction_alive to-/true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.delay_transaction_start_until_first_write to true; +/#set spanner.keep_transaction_alive to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to true/#; +set spanner.keep_transaction_alive to true/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to/#true; +set spanner.keep_transaction_alive to/#true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.delay_transaction_start_until_first_write to true; +/-set spanner.keep_transaction_alive to true; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to true/-; +set spanner.keep_transaction_alive to true/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to/-true; +set spanner.keep_transaction_alive to/-true; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write to false; +set spanner.keep_transaction_alive to false; NEW_CONNECTION; -SET SPANNER.DELAY_TRANSACTION_START_UNTIL_FIRST_WRITE TO FALSE; +SET SPANNER.KEEP_TRANSACTION_ALIVE TO FALSE; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write to false; +set spanner.keep_transaction_alive to false; NEW_CONNECTION; - set spanner.delay_transaction_start_until_first_write to false; + set spanner.keep_transaction_alive to false; NEW_CONNECTION; - set spanner.delay_transaction_start_until_first_write to false; + set spanner.keep_transaction_alive to false; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write to false; +set spanner.keep_transaction_alive to false; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write to false ; +set spanner.keep_transaction_alive to false ; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write to false ; +set spanner.keep_transaction_alive to false ; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write to false +set spanner.keep_transaction_alive to false ; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write to false; +set spanner.keep_transaction_alive to false; NEW_CONNECTION; -set spanner.delay_transaction_start_until_first_write to false; +set spanner.keep_transaction_alive to false; NEW_CONNECTION; set -spanner.delay_transaction_start_until_first_write +spanner.keep_transaction_alive to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -foo set spanner.delay_transaction_start_until_first_write to false; +foo set spanner.keep_transaction_alive to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to false bar; +set spanner.keep_transaction_alive to false bar; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -%set spanner.delay_transaction_start_until_first_write to false; +%set spanner.keep_transaction_alive to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to false%; +set spanner.keep_transaction_alive to false%; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to%false; +set spanner.keep_transaction_alive to%false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -_set spanner.delay_transaction_start_until_first_write to false; +_set spanner.keep_transaction_alive to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to false_; +set spanner.keep_transaction_alive to false_; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to_false; +set spanner.keep_transaction_alive to_false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -&set spanner.delay_transaction_start_until_first_write to false; +&set spanner.keep_transaction_alive to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to false&; +set spanner.keep_transaction_alive to false&; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to&false; +set spanner.keep_transaction_alive to&false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -$set spanner.delay_transaction_start_until_first_write to false; +$set spanner.keep_transaction_alive to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to false$; +set spanner.keep_transaction_alive to false$; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to$false; +set spanner.keep_transaction_alive to$false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -@set spanner.delay_transaction_start_until_first_write to false; +@set spanner.keep_transaction_alive to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to false@; +set spanner.keep_transaction_alive to false@; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to@false; +set spanner.keep_transaction_alive to@false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -!set spanner.delay_transaction_start_until_first_write to false; +!set spanner.keep_transaction_alive to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to false!; +set spanner.keep_transaction_alive to false!; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to!false; +set spanner.keep_transaction_alive to!false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -*set spanner.delay_transaction_start_until_first_write to false; +*set spanner.keep_transaction_alive to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to false*; +set spanner.keep_transaction_alive to false*; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to*false; +set spanner.keep_transaction_alive to*false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -(set spanner.delay_transaction_start_until_first_write to false; +(set spanner.keep_transaction_alive to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to false(; +set spanner.keep_transaction_alive to false(; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to(false; +set spanner.keep_transaction_alive to(false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -)set spanner.delay_transaction_start_until_first_write to false; +)set spanner.keep_transaction_alive to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to false); +set spanner.keep_transaction_alive to false); NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to)false; +set spanner.keep_transaction_alive to)false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --set spanner.delay_transaction_start_until_first_write to false; +-set spanner.keep_transaction_alive to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to false-; +set spanner.keep_transaction_alive to false-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to-false; +set spanner.keep_transaction_alive to-false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -+set spanner.delay_transaction_start_until_first_write to false; ++set spanner.keep_transaction_alive to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to false+; +set spanner.keep_transaction_alive to false+; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to+false; +set spanner.keep_transaction_alive to+false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --#set spanner.delay_transaction_start_until_first_write to false; +-#set spanner.keep_transaction_alive to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to false-#; +set spanner.keep_transaction_alive to false-#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to-#false; +set spanner.keep_transaction_alive to-#false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/set spanner.delay_transaction_start_until_first_write to false; +/set spanner.keep_transaction_alive to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to false/; +set spanner.keep_transaction_alive to false/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to/false; +set spanner.keep_transaction_alive to/false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -\set spanner.delay_transaction_start_until_first_write to false; +\set spanner.keep_transaction_alive to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to false\; +set spanner.keep_transaction_alive to false\; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to\false; +set spanner.keep_transaction_alive to\false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -?set spanner.delay_transaction_start_until_first_write to false; +?set spanner.keep_transaction_alive to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to false?; +set spanner.keep_transaction_alive to false?; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to?false; +set spanner.keep_transaction_alive to?false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT --/set spanner.delay_transaction_start_until_first_write to false; +-/set spanner.keep_transaction_alive to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to false-/; +set spanner.keep_transaction_alive to false-/; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to-/false; +set spanner.keep_transaction_alive to-/false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/#set spanner.delay_transaction_start_until_first_write to false; +/#set spanner.keep_transaction_alive to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to false/#; +set spanner.keep_transaction_alive to false/#; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to/#false; +set spanner.keep_transaction_alive to/#false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -/-set spanner.delay_transaction_start_until_first_write to false; +/-set spanner.keep_transaction_alive to false; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to false/-; +set spanner.keep_transaction_alive to false/-; NEW_CONNECTION; @EXPECT EXCEPTION INVALID_ARGUMENT -set spanner.delay_transaction_start_until_first_write to/-false; +set spanner.keep_transaction_alive to/-false; NEW_CONNECTION; show spanner.data_boost_enabled; NEW_CONNECTION; diff --git a/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/postgresql/ConnectionImplGeneratedSqlScriptTest.sql b/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/postgresql/ConnectionImplGeneratedSqlScriptTest.sql index e35ae5f3c3c..03737a89c27 100644 --- a/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/postgresql/ConnectionImplGeneratedSqlScriptTest.sql +++ b/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/connection/postgresql/ConnectionImplGeneratedSqlScriptTest.sql @@ -160,15 +160,15 @@ NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=FALSE; COMMIT; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:32.957000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:32.957000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:28.346000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:28.346000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=FALSE; COMMIT; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:32.957000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:28.346000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=FALSE; @@ -510,15 +510,15 @@ NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=FALSE; SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.060000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:33.060000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:28.465000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:28.465000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=FALSE; SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.060000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:28.465000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=FALSE; @@ -950,8 +950,8 @@ BEGIN TRANSACTION; @EXPECT RESULT_SET 'TEST',1 SELECT 1 AS TEST; ROLLBACK; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.165000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:33.165000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:28.586000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:28.586000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; @@ -961,7 +961,7 @@ BEGIN TRANSACTION; SELECT 1 AS TEST; ROLLBACK; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.165000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:28.586000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=FALSE; @@ -1462,8 +1462,8 @@ BEGIN TRANSACTION; @EXPECT RESULT_SET 'TEST',1 SELECT 1 AS TEST; COMMIT; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.275000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:33.275000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:28.695000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:28.695000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; @@ -1473,7 +1473,7 @@ BEGIN TRANSACTION; SELECT 1 AS TEST; COMMIT; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.275000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:28.695000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=FALSE; @@ -1876,15 +1876,15 @@ NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=FALSE; BEGIN TRANSACTION; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.351000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:33.351000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:28.776000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:28.776000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=FALSE; BEGIN TRANSACTION; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.351000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:28.776000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=FALSE; @@ -2243,14 +2243,14 @@ SET AUTOCOMMIT=FALSE; @EXPECT RESULT_SET 'TEST',1 SELECT 1 AS TEST; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.428000000Z'; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:28.860000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=FALSE; @EXPECT RESULT_SET 'TEST',1 SELECT 1 AS TEST; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.428000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:28.860000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=FALSE; @@ -2600,13 +2600,13 @@ SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=FALSE; SELECT 1 AS TEST; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.509000000Z'; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:28.947000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=FALSE; SELECT 1 AS TEST; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.509000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:28.947000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=FALSE; @@ -2910,14 +2910,14 @@ SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=FALSE; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.575000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:33.575000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.020000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:29.020000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=FALSE; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.575000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.020000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=FALSE; @@ -3245,15 +3245,15 @@ NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; COMMIT; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.662000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:33.662000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.108000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:29.108000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; COMMIT; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.662000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.108000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -3662,8 +3662,8 @@ SET AUTOCOMMIT=FALSE; START BATCH DDL; CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); RUN BATCH; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.747000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:33.747000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.190000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:29.190000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; @@ -3672,7 +3672,7 @@ START BATCH DDL; CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); RUN BATCH; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.747000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.190000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -4081,14 +4081,14 @@ SET AUTOCOMMIT=FALSE; START BATCH DDL; CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.806000000Z'; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.261000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; START BATCH DDL; CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.806000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.261000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -4438,13 +4438,13 @@ SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; START BATCH DDL; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.861000000Z'; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.321000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; START BATCH DDL; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.861000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.321000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -4877,8 +4877,8 @@ SET TRANSACTION READ ONLY; @EXPECT RESULT_SET 'TEST',1 SELECT 1 AS TEST; COMMIT; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.927000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:33.927000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.398000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:29.398000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; @@ -4888,7 +4888,7 @@ SET TRANSACTION READ ONLY; SELECT 1 AS TEST; COMMIT; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.927000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.398000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -5288,15 +5288,15 @@ NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; SET TRANSACTION READ ONLY; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:33.985000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:33.985000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.470000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:29.470000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; SET TRANSACTION READ ONLY; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:33.985000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.470000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -5641,15 +5641,15 @@ NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.041000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.041000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.534000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:29.534000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; SET SPANNER.READ_ONLY_STALENESS='EXACT_STALENESS 10s'; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.041000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.534000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -6088,8 +6088,8 @@ BEGIN TRANSACTION; @EXPECT RESULT_SET 'TEST',1 SELECT 1 AS TEST; ROLLBACK; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.108000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.108000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.614000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:29.614000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; @@ -6099,7 +6099,7 @@ BEGIN TRANSACTION; SELECT 1 AS TEST; ROLLBACK; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.108000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.614000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -6607,8 +6607,8 @@ BEGIN TRANSACTION; @EXPECT RESULT_SET 'TEST',1 SELECT 1 AS TEST; COMMIT; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.191000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.191000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.706000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:29.706000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; @@ -6618,7 +6618,7 @@ BEGIN TRANSACTION; SELECT 1 AS TEST; COMMIT; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.191000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.706000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -7023,15 +7023,15 @@ NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; BEGIN TRANSACTION; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.254000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.254000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.766000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:29.766000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; BEGIN TRANSACTION; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.254000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.766000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -7394,14 +7394,14 @@ SET AUTOCOMMIT=FALSE; @EXPECT RESULT_SET 'TEST',1 SELECT 1 AS TEST; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.318000000Z'; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.827000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; @EXPECT RESULT_SET 'TEST',1 SELECT 1 AS TEST; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.318000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.827000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -7756,13 +7756,13 @@ SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; SELECT 1 AS TEST; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.384000000Z'; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.894000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; SELECT 1 AS TEST; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.384000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.894000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -8075,14 +8075,14 @@ SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.443000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.443000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:29.947000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:29.947000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.443000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:29.947000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=FALSE; @@ -8392,13 +8392,13 @@ SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=TRUE; START BATCH DDL; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.492000000Z'; +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.003000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=TRUE; START BATCH DDL; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.492000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.003000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=TRUE; @@ -8753,8 +8753,8 @@ SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=TRUE; BEGIN TRANSACTION; SET TRANSACTION READ ONLY; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.542000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.542000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.053000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.053000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; @@ -8762,7 +8762,7 @@ SET AUTOCOMMIT=TRUE; BEGIN TRANSACTION; SET TRANSACTION READ ONLY; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.542000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.053000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=TRUE; @@ -9197,8 +9197,8 @@ SET AUTOCOMMIT=TRUE; BEGIN TRANSACTION; UPDATE foo SET bar=1; COMMIT; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.611000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.611000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.121000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.121000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; @@ -9206,8 +9206,8 @@ SET AUTOCOMMIT=TRUE; BEGIN TRANSACTION; UPDATE foo SET bar=1; COMMIT; -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.611000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-04-22T15:43:34.611000000Z' +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.121000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-09-09T09:13:30.121000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; @@ -9593,15 +9593,15 @@ NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=TRUE; BEGIN TRANSACTION; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.665000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.665000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.174000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.174000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=TRUE; BEGIN TRANSACTION; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.665000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.174000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=TRUE; @@ -9952,15 +9952,15 @@ NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=TRUE; CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.719000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.719000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.229000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.229000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=TRUE; CREATE TABLE foo (id INT64 NOT NULL, name STRING(100)) PRIMARY KEY (id); -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.719000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-04-22T15:43:34.719000000Z' +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.229000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-09-09T09:13:30.229000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; @@ -10320,15 +10320,15 @@ NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=TRUE; UPDATE foo SET bar=1; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.800000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.800000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.285000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.285000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=TRUE; UPDATE foo SET bar=1; -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.800000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-04-22T15:43:34.800000000Z' +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.285000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-09-09T09:13:30.285000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; @@ -10718,16 +10718,16 @@ SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=TRUE; @EXPECT RESULT_SET 'TEST',1 SELECT 1 AS TEST; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.857000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.857000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.343000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.343000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=TRUE; @EXPECT RESULT_SET 'TEST',1 SELECT 1 AS TEST; -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.857000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-04-22T15:43:34.857000000Z' +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.343000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-09-09T09:13:30.343000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; @@ -11110,15 +11110,15 @@ NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=TRUE; SELECT 1 AS TEST; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.918000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.918000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.400000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.400000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=TRUE; SELECT 1 AS TEST; -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.918000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-04-22T15:43:34.918000000Z' +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.400000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-09-09T09:13:30.400000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; @@ -11448,14 +11448,14 @@ SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=TRUE; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:34.974000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:34.974000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.449000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.449000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; SET AUTOCOMMIT=TRUE; -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:34.974000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-04-22T15:43:34.974000000Z' +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.449000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-09-09T09:13:30.449000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=FALSE; @@ -11778,15 +11778,15 @@ NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=TRUE; SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:35.024000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:35.024000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.501000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.501000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=TRUE; SET SPANNER.READ_ONLY_STALENESS='MAX_STALENESS 10s'; -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:35.024000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-04-22T15:43:35.024000000Z' +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.501000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-09-09T09:13:30.501000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; @@ -12193,8 +12193,8 @@ SET AUTOCOMMIT=TRUE; BEGIN TRANSACTION; SELECT 1 AS TEST; COMMIT; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:35.080000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:35.080000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.557000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.557000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; @@ -12202,8 +12202,8 @@ SET AUTOCOMMIT=TRUE; BEGIN TRANSACTION; SELECT 1 AS TEST; COMMIT; -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:35.080000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-04-22T15:43:35.080000000Z' +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.557000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-09-09T09:13:30.557000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; @@ -12586,15 +12586,15 @@ NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=TRUE; BEGIN TRANSACTION; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:35.129000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:35.129000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.608000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.608000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=TRUE; BEGIN TRANSACTION; @EXPECT EXCEPTION FAILED_PRECONDITION -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:35.129000000Z'; +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.608000000Z'; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=TRUE; @@ -12932,15 +12932,15 @@ NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=TRUE; SELECT 1 AS TEST; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:35.184000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:35.184000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.658000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.658000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=TRUE; SELECT 1 AS TEST; -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:35.184000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-04-22T15:43:35.184000000Z' +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.658000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-09-09T09:13:30.658000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; @@ -13287,15 +13287,15 @@ NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=TRUE; SELECT 1 AS TEST; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:35.242000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:35.242000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.708000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.708000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=TRUE; SELECT 1 AS TEST; -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:35.242000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-04-22T15:43:35.242000000Z' +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.708000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-09-09T09:13:30.708000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; @@ -13612,14 +13612,14 @@ SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=TRUE; -SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-04-22T15:43:35.293000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-04-22T15:43:35.293000000Z' +SET SPANNER.READ_ONLY_STALENESS='READ_TIMESTAMP 2024-09-09T09:13:30.756000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','READ_TIMESTAMP 2024-09-09T09:13:30.756000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; SET AUTOCOMMIT=TRUE; -SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-04-22T15:43:35.293000000Z'; -@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-04-22T15:43:35.293000000Z' +SET SPANNER.READ_ONLY_STALENESS='MIN_READ_TIMESTAMP 2024-09-09T09:13:30.756000000Z'; +@EXPECT RESULT_SET 'SPANNER.READ_ONLY_STALENESS','MIN_READ_TIMESTAMP 2024-09-09T09:13:30.756000000Z' SHOW VARIABLE SPANNER.READ_ONLY_STALENESS; NEW_CONNECTION; SET SPANNER.READONLY=TRUE; diff --git a/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/descriptors.pb b/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/descriptors.pb index 3ebb79420b3..115e5fccbb5 100644 Binary files a/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/descriptors.pb and b/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/descriptors.pb differ diff --git a/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/singer.proto b/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/singer.proto index b62b48ed67f..570a3f6f705 100644 --- a/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/singer.proto +++ b/google-cloud-spanner/src/test/resources/com/google/cloud/spanner/singer.proto @@ -1,6 +1,6 @@ -syntax = "proto2"; +syntax = "proto3"; -package spanner.examples.music; +package examples.spanner.music; option java_package = "com.google.cloud.spanner"; option java_outer_classname = "SingerProto"; diff --git a/grpc-google-cloud-spanner-admin-database-v1/clirr-ignored-differences.xml b/grpc-google-cloud-spanner-admin-database-v1/clirr-ignored-differences.xml new file mode 100644 index 00000000000..80e6f1d59cb --- /dev/null +++ b/grpc-google-cloud-spanner-admin-database-v1/clirr-ignored-differences.xml @@ -0,0 +1,9 @@ + + + + + 7012 + com/google/spanner/admin/database/v1/* + * + + diff --git a/grpc-google-cloud-spanner-admin-database-v1/pom.xml b/grpc-google-cloud-spanner-admin-database-v1/pom.xml index b2da22f2879..0eb83a296cd 100644 --- a/grpc-google-cloud-spanner-admin-database-v1/pom.xml +++ b/grpc-google-cloud-spanner-admin-database-v1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-spanner-admin-database-v1 - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT grpc-google-cloud-spanner-admin-database-v1 GRPC library for grpc-google-cloud-spanner-admin-database-v1 com.google.cloud google-cloud-spanner-parent - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT diff --git a/grpc-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseAdminGrpc.java b/grpc-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseAdminGrpc.java index d2f6339bae4..01592c14be9 100644 --- a/grpc-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseAdminGrpc.java +++ b/grpc-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseAdminGrpc.java @@ -25,7 +25,7 @@ * The Cloud Spanner Database Admin API can be used to: * * create, drop, and list databases * * update the schema of pre-existing databases - * * create, delete and list backups for a database + * * create, delete, copy and list backups for a database * * restore a database from an existing backup * */ @@ -943,6 +943,248 @@ private DatabaseAdminGrpc() {} return getListDatabaseRolesMethod; } + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest, + com.google.spanner.admin.database.v1.BackupSchedule> + getCreateBackupScheduleMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateBackupSchedule", + requestType = com.google.spanner.admin.database.v1.CreateBackupScheduleRequest.class, + responseType = com.google.spanner.admin.database.v1.BackupSchedule.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest, + com.google.spanner.admin.database.v1.BackupSchedule> + getCreateBackupScheduleMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest, + com.google.spanner.admin.database.v1.BackupSchedule> + getCreateBackupScheduleMethod; + if ((getCreateBackupScheduleMethod = DatabaseAdminGrpc.getCreateBackupScheduleMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getCreateBackupScheduleMethod = DatabaseAdminGrpc.getCreateBackupScheduleMethod) + == null) { + DatabaseAdminGrpc.getCreateBackupScheduleMethod = + getCreateBackupScheduleMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "CreateBackupSchedule")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.BackupSchedule + .getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("CreateBackupSchedule")) + .build(); + } + } + } + return getCreateBackupScheduleMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.GetBackupScheduleRequest, + com.google.spanner.admin.database.v1.BackupSchedule> + getGetBackupScheduleMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetBackupSchedule", + requestType = com.google.spanner.admin.database.v1.GetBackupScheduleRequest.class, + responseType = com.google.spanner.admin.database.v1.BackupSchedule.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.GetBackupScheduleRequest, + com.google.spanner.admin.database.v1.BackupSchedule> + getGetBackupScheduleMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.GetBackupScheduleRequest, + com.google.spanner.admin.database.v1.BackupSchedule> + getGetBackupScheduleMethod; + if ((getGetBackupScheduleMethod = DatabaseAdminGrpc.getGetBackupScheduleMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getGetBackupScheduleMethod = DatabaseAdminGrpc.getGetBackupScheduleMethod) == null) { + DatabaseAdminGrpc.getGetBackupScheduleMethod = + getGetBackupScheduleMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetBackupSchedule")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.GetBackupScheduleRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.BackupSchedule + .getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("GetBackupSchedule")) + .build(); + } + } + } + return getGetBackupScheduleMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest, + com.google.spanner.admin.database.v1.BackupSchedule> + getUpdateBackupScheduleMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "UpdateBackupSchedule", + requestType = com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest.class, + responseType = com.google.spanner.admin.database.v1.BackupSchedule.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest, + com.google.spanner.admin.database.v1.BackupSchedule> + getUpdateBackupScheduleMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest, + com.google.spanner.admin.database.v1.BackupSchedule> + getUpdateBackupScheduleMethod; + if ((getUpdateBackupScheduleMethod = DatabaseAdminGrpc.getUpdateBackupScheduleMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getUpdateBackupScheduleMethod = DatabaseAdminGrpc.getUpdateBackupScheduleMethod) + == null) { + DatabaseAdminGrpc.getUpdateBackupScheduleMethod = + getUpdateBackupScheduleMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "UpdateBackupSchedule")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.BackupSchedule + .getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("UpdateBackupSchedule")) + .build(); + } + } + } + return getUpdateBackupScheduleMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest, + com.google.protobuf.Empty> + getDeleteBackupScheduleMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "DeleteBackupSchedule", + requestType = com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest.class, + responseType = com.google.protobuf.Empty.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest, + com.google.protobuf.Empty> + getDeleteBackupScheduleMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest, + com.google.protobuf.Empty> + getDeleteBackupScheduleMethod; + if ((getDeleteBackupScheduleMethod = DatabaseAdminGrpc.getDeleteBackupScheduleMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getDeleteBackupScheduleMethod = DatabaseAdminGrpc.getDeleteBackupScheduleMethod) + == null) { + DatabaseAdminGrpc.getDeleteBackupScheduleMethod = + getDeleteBackupScheduleMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "DeleteBackupSchedule")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.protobuf.Empty.getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("DeleteBackupSchedule")) + .build(); + } + } + } + return getDeleteBackupScheduleMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest, + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse> + getListBackupSchedulesMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ListBackupSchedules", + requestType = com.google.spanner.admin.database.v1.ListBackupSchedulesRequest.class, + responseType = com.google.spanner.admin.database.v1.ListBackupSchedulesResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest, + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse> + getListBackupSchedulesMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest, + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse> + getListBackupSchedulesMethod; + if ((getListBackupSchedulesMethod = DatabaseAdminGrpc.getListBackupSchedulesMethod) == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getListBackupSchedulesMethod = DatabaseAdminGrpc.getListBackupSchedulesMethod) + == null) { + DatabaseAdminGrpc.getListBackupSchedulesMethod = + getListBackupSchedulesMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "ListBackupSchedules")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("ListBackupSchedules")) + .build(); + } + } + } + return getListBackupSchedulesMethod; + } + /** Creates a new async stub that supports all call types for the service */ public static DatabaseAdminStub newStub(io.grpc.Channel channel) { io.grpc.stub.AbstractStub.StubFactory factory = @@ -992,7 +1234,7 @@ public DatabaseAdminFutureStub newStub( * The Cloud Spanner Database Admin API can be used to: * * create, drop, and list databases * * update the schema of pre-existing databases - * * create, delete and list backups for a database + * * create, delete, copy and list backups for a database * * restore a database from an existing backup * */ @@ -1022,8 +1264,8 @@ default void listDatabases( * have a name of the format `<database_name>/operations/<operation_id>` and * can be used to track preparation of the database. The * [metadata][google.longrunning.Operation.metadata] field type is - * [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The - * [response][google.longrunning.Operation.response] field type is + * [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. + * The [response][google.longrunning.Operation.response] field type is * [Database][google.spanner.admin.database.v1.Database], if successful. * */ @@ -1103,7 +1345,8 @@ default void updateDatabase( * the format `<database_name>/operations/<operation_id>` and can be used to * track execution of the schema change(s). The * [metadata][google.longrunning.Operation.metadata] field type is - * [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response. + * [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. + * The operation has no response. * */ default void updateDatabaseDdl( @@ -1220,12 +1463,12 @@ default void testIamPermissions( * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>` * and can be used to track creation of the backup. The * [metadata][google.longrunning.Operation.metadata] field type is - * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. The - * [response][google.longrunning.Operation.response] field type is - * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned operation will stop the - * creation and delete the backup. - * There can be only one pending backup creation per database. Backup creation - * of different databases can run concurrently. + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + * The [response][google.longrunning.Operation.response] field type is + * [Backup][google.spanner.admin.database.v1.Backup], if successful. + * Cancelling the returned operation will stop the creation and delete the + * backup. There can be only one pending backup creation per database. Backup + * creation of different databases can run concurrently. * */ default void createBackup( @@ -1248,9 +1491,10 @@ default void createBackup( * The [metadata][google.longrunning.Operation.metadata] field type is * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. * The [response][google.longrunning.Operation.response] field type is - * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned operation will stop the - * copying and delete the backup. - * Concurrent CopyBackup requests can run on the same source backup. + * [Backup][google.spanner.admin.database.v1.Backup], if successful. + * Cancelling the returned operation will stop the copying and delete the + * destination backup. Concurrent CopyBackup requests can run on the same + * source backup. * */ default void copyBackup( @@ -1263,7 +1507,8 @@ default void copyBackup( * * *
    -     * Gets metadata on a pending or completed [Backup][google.spanner.admin.database.v1.Backup].
    +     * Gets metadata on a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
          * 
    */ default void getBackup( @@ -1276,7 +1521,8 @@ default void getBackup( * * *
    -     * Updates a pending or completed [Backup][google.spanner.admin.database.v1.Backup].
    +     * Updates a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
          * 
    */ default void updateBackup( @@ -1290,7 +1536,8 @@ default void updateBackup( * * *
    -     * Deletes a pending or completed [Backup][google.spanner.admin.database.v1.Backup].
    +     * Deletes a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
          * 
    */ default void deleteBackup( @@ -1409,6 +1656,81 @@ default void listDatabaseRoles( io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( getListDatabaseRolesMethod(), responseObserver); } + + /** + * + * + *
    +     * Creates a new backup schedule.
    +     * 
    + */ + default void createBackupSchedule( + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getCreateBackupScheduleMethod(), responseObserver); + } + + /** + * + * + *
    +     * Gets backup schedule for the input schedule name.
    +     * 
    + */ + default void getBackupSchedule( + com.google.spanner.admin.database.v1.GetBackupScheduleRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getGetBackupScheduleMethod(), responseObserver); + } + + /** + * + * + *
    +     * Updates a backup schedule.
    +     * 
    + */ + default void updateBackupSchedule( + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getUpdateBackupScheduleMethod(), responseObserver); + } + + /** + * + * + *
    +     * Deletes a backup schedule.
    +     * 
    + */ + default void deleteBackupSchedule( + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getDeleteBackupScheduleMethod(), responseObserver); + } + + /** + * + * + *
    +     * Lists all the backup schedules for the database.
    +     * 
    + */ + default void listBackupSchedules( + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest request, + io.grpc.stub.StreamObserver< + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getListBackupSchedulesMethod(), responseObserver); + } } /** @@ -1419,7 +1741,7 @@ default void listDatabaseRoles( * The Cloud Spanner Database Admin API can be used to: * * create, drop, and list databases * * update the schema of pre-existing databases - * * create, delete and list backups for a database + * * create, delete, copy and list backups for a database * * restore a database from an existing backup * */ @@ -1440,7 +1762,7 @@ public final io.grpc.ServerServiceDefinition bindService() { * The Cloud Spanner Database Admin API can be used to: * * create, drop, and list databases * * update the schema of pre-existing databases - * * create, delete and list backups for a database + * * create, delete, copy and list backups for a database * * restore a database from an existing backup * */ @@ -1481,8 +1803,8 @@ public void listDatabases( * have a name of the format `<database_name>/operations/<operation_id>` and * can be used to track preparation of the database. The * [metadata][google.longrunning.Operation.metadata] field type is - * [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The - * [response][google.longrunning.Operation.response] field type is + * [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. + * The [response][google.longrunning.Operation.response] field type is * [Database][google.spanner.admin.database.v1.Database], if successful. * */ @@ -1568,7 +1890,8 @@ public void updateDatabase( * the format `<database_name>/operations/<operation_id>` and can be used to * track execution of the schema change(s). The * [metadata][google.longrunning.Operation.metadata] field type is - * [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response. + * [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. + * The operation has no response. * */ public void updateDatabaseDdl( @@ -1697,12 +2020,12 @@ public void testIamPermissions( * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>` * and can be used to track creation of the backup. The * [metadata][google.longrunning.Operation.metadata] field type is - * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. The - * [response][google.longrunning.Operation.response] field type is - * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned operation will stop the - * creation and delete the backup. - * There can be only one pending backup creation per database. Backup creation - * of different databases can run concurrently. + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + * The [response][google.longrunning.Operation.response] field type is + * [Backup][google.spanner.admin.database.v1.Backup], if successful. + * Cancelling the returned operation will stop the creation and delete the + * backup. There can be only one pending backup creation per database. Backup + * creation of different databases can run concurrently. * */ public void createBackup( @@ -1727,9 +2050,10 @@ public void createBackup( * The [metadata][google.longrunning.Operation.metadata] field type is * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. * The [response][google.longrunning.Operation.response] field type is - * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned operation will stop the - * copying and delete the backup. - * Concurrent CopyBackup requests can run on the same source backup. + * [Backup][google.spanner.admin.database.v1.Backup], if successful. + * Cancelling the returned operation will stop the copying and delete the + * destination backup. Concurrent CopyBackup requests can run on the same + * source backup. * */ public void copyBackup( @@ -1743,7 +2067,8 @@ public void copyBackup( * * *
    -     * Gets metadata on a pending or completed [Backup][google.spanner.admin.database.v1.Backup].
    +     * Gets metadata on a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
          * 
    */ public void getBackup( @@ -1757,7 +2082,8 @@ public void getBackup( * * *
    -     * Updates a pending or completed [Backup][google.spanner.admin.database.v1.Backup].
    +     * Updates a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
          * 
    */ public void updateBackup( @@ -1773,7 +2099,8 @@ public void updateBackup( * * *
    -     * Deletes a pending or completed [Backup][google.spanner.admin.database.v1.Backup].
    +     * Deletes a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
          * 
    */ public void deleteBackup( @@ -1904,6 +2231,91 @@ public void listDatabaseRoles( request, responseObserver); } + + /** + * + * + *
    +     * Creates a new backup schedule.
    +     * 
    + */ + public void createBackupSchedule( + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getCreateBackupScheduleMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Gets backup schedule for the input schedule name.
    +     * 
    + */ + public void getBackupSchedule( + com.google.spanner.admin.database.v1.GetBackupScheduleRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getGetBackupScheduleMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Updates a backup schedule.
    +     * 
    + */ + public void updateBackupSchedule( + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getUpdateBackupScheduleMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Deletes a backup schedule.
    +     * 
    + */ + public void deleteBackupSchedule( + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getDeleteBackupScheduleMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
    +     * Lists all the backup schedules for the database.
    +     * 
    + */ + public void listBackupSchedules( + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest request, + io.grpc.stub.StreamObserver< + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getListBackupSchedulesMethod(), getCallOptions()), + request, + responseObserver); + } } /** @@ -1914,7 +2326,7 @@ public void listDatabaseRoles( * The Cloud Spanner Database Admin API can be used to: * * create, drop, and list databases * * update the schema of pre-existing databases - * * create, delete and list backups for a database + * * create, delete, copy and list backups for a database * * restore a database from an existing backup * */ @@ -1952,8 +2364,8 @@ public com.google.spanner.admin.database.v1.ListDatabasesResponse listDatabases( * have a name of the format `<database_name>/operations/<operation_id>` and * can be used to track preparation of the database. The * [metadata][google.longrunning.Operation.metadata] field type is - * [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The - * [response][google.longrunning.Operation.response] field type is + * [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. + * The [response][google.longrunning.Operation.response] field type is * [Database][google.spanner.admin.database.v1.Database], if successful. * */ @@ -2029,7 +2441,8 @@ public com.google.longrunning.Operation updateDatabase( * the format `<database_name>/operations/<operation_id>` and can be used to * track execution of the schema change(s). The * [metadata][google.longrunning.Operation.metadata] field type is - * [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response. + * [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. + * The operation has no response. * */ public com.google.longrunning.Operation updateDatabaseDdl( @@ -2136,12 +2549,12 @@ public com.google.iam.v1.TestIamPermissionsResponse testIamPermissions( * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>` * and can be used to track creation of the backup. The * [metadata][google.longrunning.Operation.metadata] field type is - * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. The - * [response][google.longrunning.Operation.response] field type is - * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned operation will stop the - * creation and delete the backup. - * There can be only one pending backup creation per database. Backup creation - * of different databases can run concurrently. + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + * The [response][google.longrunning.Operation.response] field type is + * [Backup][google.spanner.admin.database.v1.Backup], if successful. + * Cancelling the returned operation will stop the creation and delete the + * backup. There can be only one pending backup creation per database. Backup + * creation of different databases can run concurrently. * */ public com.google.longrunning.Operation createBackup( @@ -2163,9 +2576,10 @@ public com.google.longrunning.Operation createBackup( * The [metadata][google.longrunning.Operation.metadata] field type is * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. * The [response][google.longrunning.Operation.response] field type is - * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned operation will stop the - * copying and delete the backup. - * Concurrent CopyBackup requests can run on the same source backup. + * [Backup][google.spanner.admin.database.v1.Backup], if successful. + * Cancelling the returned operation will stop the copying and delete the + * destination backup. Concurrent CopyBackup requests can run on the same + * source backup. * */ public com.google.longrunning.Operation copyBackup( @@ -2178,7 +2592,8 @@ public com.google.longrunning.Operation copyBackup( * * *
    -     * Gets metadata on a pending or completed [Backup][google.spanner.admin.database.v1.Backup].
    +     * Gets metadata on a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
          * 
    */ public com.google.spanner.admin.database.v1.Backup getBackup( @@ -2191,7 +2606,8 @@ public com.google.spanner.admin.database.v1.Backup getBackup( * * *
    -     * Updates a pending or completed [Backup][google.spanner.admin.database.v1.Backup].
    +     * Updates a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
          * 
    */ public com.google.spanner.admin.database.v1.Backup updateBackup( @@ -2204,7 +2620,8 @@ public com.google.spanner.admin.database.v1.Backup updateBackup( * * *
    -     * Deletes a pending or completed [Backup][google.spanner.admin.database.v1.Backup].
    +     * Deletes a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
          * 
    */ public com.google.protobuf.Empty deleteBackup( @@ -2312,6 +2729,71 @@ public com.google.spanner.admin.database.v1.ListDatabaseRolesResponse listDataba return io.grpc.stub.ClientCalls.blockingUnaryCall( getChannel(), getListDatabaseRolesMethod(), getCallOptions(), request); } + + /** + * + * + *
    +     * Creates a new backup schedule.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.BackupSchedule createBackupSchedule( + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getCreateBackupScheduleMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Gets backup schedule for the input schedule name.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.BackupSchedule getBackupSchedule( + com.google.spanner.admin.database.v1.GetBackupScheduleRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getGetBackupScheduleMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Updates a backup schedule.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.BackupSchedule updateBackupSchedule( + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getUpdateBackupScheduleMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Deletes a backup schedule.
    +     * 
    + */ + public com.google.protobuf.Empty deleteBackupSchedule( + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getDeleteBackupScheduleMethod(), getCallOptions(), request); + } + + /** + * + * + *
    +     * Lists all the backup schedules for the database.
    +     * 
    + */ + public com.google.spanner.admin.database.v1.ListBackupSchedulesResponse listBackupSchedules( + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getListBackupSchedulesMethod(), getCallOptions(), request); + } } /** @@ -2322,7 +2804,7 @@ public com.google.spanner.admin.database.v1.ListDatabaseRolesResponse listDataba * The Cloud Spanner Database Admin API can be used to: * * create, drop, and list databases * * update the schema of pre-existing databases - * * create, delete and list backups for a database + * * create, delete, copy and list backups for a database * * restore a database from an existing backup * */ @@ -2361,8 +2843,8 @@ protected DatabaseAdminFutureStub build( * have a name of the format `<database_name>/operations/<operation_id>` and * can be used to track preparation of the database. The * [metadata][google.longrunning.Operation.metadata] field type is - * [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The - * [response][google.longrunning.Operation.response] field type is + * [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. + * The [response][google.longrunning.Operation.response] field type is * [Database][google.spanner.admin.database.v1.Database], if successful. * */ @@ -2439,7 +2921,8 @@ protected DatabaseAdminFutureStub build( * the format `<database_name>/operations/<operation_id>` and can be used to * track execution of the schema change(s). The * [metadata][google.longrunning.Operation.metadata] field type is - * [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response. + * [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. + * The operation has no response. * */ public com.google.common.util.concurrent.ListenableFuture @@ -2550,12 +3033,12 @@ protected DatabaseAdminFutureStub build( * `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>` * and can be used to track creation of the backup. The * [metadata][google.longrunning.Operation.metadata] field type is - * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. The - * [response][google.longrunning.Operation.response] field type is - * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned operation will stop the - * creation and delete the backup. - * There can be only one pending backup creation per database. Backup creation - * of different databases can run concurrently. + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + * The [response][google.longrunning.Operation.response] field type is + * [Backup][google.spanner.admin.database.v1.Backup], if successful. + * Cancelling the returned operation will stop the creation and delete the + * backup. There can be only one pending backup creation per database. Backup + * creation of different databases can run concurrently. * */ public com.google.common.util.concurrent.ListenableFuture @@ -2577,9 +3060,10 @@ protected DatabaseAdminFutureStub build( * The [metadata][google.longrunning.Operation.metadata] field type is * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. * The [response][google.longrunning.Operation.response] field type is - * [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned operation will stop the - * copying and delete the backup. - * Concurrent CopyBackup requests can run on the same source backup. + * [Backup][google.spanner.admin.database.v1.Backup], if successful. + * Cancelling the returned operation will stop the copying and delete the + * destination backup. Concurrent CopyBackup requests can run on the same + * source backup. * */ public com.google.common.util.concurrent.ListenableFuture @@ -2592,7 +3076,8 @@ protected DatabaseAdminFutureStub build( * * *
    -     * Gets metadata on a pending or completed [Backup][google.spanner.admin.database.v1.Backup].
    +     * Gets metadata on a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
          * 
    */ public com.google.common.util.concurrent.ListenableFuture< @@ -2606,7 +3091,8 @@ protected DatabaseAdminFutureStub build( * * *
    -     * Updates a pending or completed [Backup][google.spanner.admin.database.v1.Backup].
    +     * Updates a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
          * 
    */ public com.google.common.util.concurrent.ListenableFuture< @@ -2620,7 +3106,8 @@ protected DatabaseAdminFutureStub build( * * *
    -     * Deletes a pending or completed [Backup][google.spanner.admin.database.v1.Backup].
    +     * Deletes a pending or completed
    +     * [Backup][google.spanner.admin.database.v1.Backup].
          * 
    */ public com.google.common.util.concurrent.ListenableFuture @@ -2733,6 +3220,79 @@ protected DatabaseAdminFutureStub build( return io.grpc.stub.ClientCalls.futureUnaryCall( getChannel().newCall(getListDatabaseRolesMethod(), getCallOptions()), request); } + + /** + * + * + *
    +     * Creates a new backup schedule.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.database.v1.BackupSchedule> + createBackupSchedule( + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getCreateBackupScheduleMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Gets backup schedule for the input schedule name.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.database.v1.BackupSchedule> + getBackupSchedule(com.google.spanner.admin.database.v1.GetBackupScheduleRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getGetBackupScheduleMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Updates a backup schedule.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.database.v1.BackupSchedule> + updateBackupSchedule( + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getUpdateBackupScheduleMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Deletes a backup schedule.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + deleteBackupSchedule( + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getDeleteBackupScheduleMethod(), getCallOptions()), request); + } + + /** + * + * + *
    +     * Lists all the backup schedules for the database.
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse> + listBackupSchedules( + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getListBackupSchedulesMethod(), getCallOptions()), request); + } } private static final int METHODID_LIST_DATABASES = 0; @@ -2755,6 +3315,11 @@ protected DatabaseAdminFutureStub build( private static final int METHODID_LIST_DATABASE_OPERATIONS = 17; private static final int METHODID_LIST_BACKUP_OPERATIONS = 18; private static final int METHODID_LIST_DATABASE_ROLES = 19; + private static final int METHODID_CREATE_BACKUP_SCHEDULE = 20; + private static final int METHODID_GET_BACKUP_SCHEDULE = 21; + private static final int METHODID_UPDATE_BACKUP_SCHEDULE = 22; + private static final int METHODID_DELETE_BACKUP_SCHEDULE = 23; + private static final int METHODID_LIST_BACKUP_SCHEDULES = 24; private static final class MethodHandlers implements io.grpc.stub.ServerCalls.UnaryMethod, @@ -2889,6 +3454,36 @@ public void invoke(Req request, io.grpc.stub.StreamObserver responseObserv com.google.spanner.admin.database.v1.ListDatabaseRolesResponse>) responseObserver); break; + case METHODID_CREATE_BACKUP_SCHEDULE: + serviceImpl.createBackupSchedule( + (com.google.spanner.admin.database.v1.CreateBackupScheduleRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_GET_BACKUP_SCHEDULE: + serviceImpl.getBackupSchedule( + (com.google.spanner.admin.database.v1.GetBackupScheduleRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_UPDATE_BACKUP_SCHEDULE: + serviceImpl.updateBackupSchedule( + (com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_DELETE_BACKUP_SCHEDULE: + serviceImpl.deleteBackupSchedule( + (com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_LIST_BACKUP_SCHEDULES: + serviceImpl.listBackupSchedules( + (com.google.spanner.admin.database.v1.ListBackupSchedulesRequest) request, + (io.grpc.stub.StreamObserver< + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse>) + responseObserver); + break; default: throw new AssertionError(); } @@ -3032,6 +3627,40 @@ public static final io.grpc.ServerServiceDefinition bindService(AsyncService ser com.google.spanner.admin.database.v1.ListDatabaseRolesRequest, com.google.spanner.admin.database.v1.ListDatabaseRolesResponse>( service, METHODID_LIST_DATABASE_ROLES))) + .addMethod( + getCreateBackupScheduleMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest, + com.google.spanner.admin.database.v1.BackupSchedule>( + service, METHODID_CREATE_BACKUP_SCHEDULE))) + .addMethod( + getGetBackupScheduleMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.GetBackupScheduleRequest, + com.google.spanner.admin.database.v1.BackupSchedule>( + service, METHODID_GET_BACKUP_SCHEDULE))) + .addMethod( + getUpdateBackupScheduleMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest, + com.google.spanner.admin.database.v1.BackupSchedule>( + service, METHODID_UPDATE_BACKUP_SCHEDULE))) + .addMethod( + getDeleteBackupScheduleMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest, + com.google.protobuf.Empty>(service, METHODID_DELETE_BACKUP_SCHEDULE))) + .addMethod( + getListBackupSchedulesMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest, + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse>( + service, METHODID_LIST_BACKUP_SCHEDULES))) .build(); } @@ -3103,6 +3732,11 @@ public static io.grpc.ServiceDescriptor getServiceDescriptor() { .addMethod(getListDatabaseOperationsMethod()) .addMethod(getListBackupOperationsMethod()) .addMethod(getListDatabaseRolesMethod()) + .addMethod(getCreateBackupScheduleMethod()) + .addMethod(getGetBackupScheduleMethod()) + .addMethod(getUpdateBackupScheduleMethod()) + .addMethod(getDeleteBackupScheduleMethod()) + .addMethod(getListBackupSchedulesMethod()) .build(); } } diff --git a/grpc-google-cloud-spanner-admin-instance-v1/clirr-ignored-differences.xml b/grpc-google-cloud-spanner-admin-instance-v1/clirr-ignored-differences.xml index e4d9658950a..c611281a06e 100644 --- a/grpc-google-cloud-spanner-admin-instance-v1/clirr-ignored-differences.xml +++ b/grpc-google-cloud-spanner-admin-instance-v1/clirr-ignored-differences.xml @@ -3,7 +3,7 @@ 7012 - com/google/spanner/admin/instance/v1/InstanceAdminGrpc$AsyncService - *InstancePartition*(*) + com/google/spanner/admin/instance/v1/* + * diff --git a/grpc-google-cloud-spanner-admin-instance-v1/pom.xml b/grpc-google-cloud-spanner-admin-instance-v1/pom.xml index ef6f48a3dff..35868459c3b 100644 --- a/grpc-google-cloud-spanner-admin-instance-v1/pom.xml +++ b/grpc-google-cloud-spanner-admin-instance-v1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-spanner-admin-instance-v1 - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT grpc-google-cloud-spanner-admin-instance-v1 GRPC library for grpc-google-cloud-spanner-admin-instance-v1 com.google.cloud google-cloud-spanner-parent - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT diff --git a/grpc-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceAdminGrpc.java b/grpc-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceAdminGrpc.java index 260460319f2..81d08cb9dbe 100644 --- a/grpc-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceAdminGrpc.java +++ b/grpc-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceAdminGrpc.java @@ -996,6 +996,52 @@ private InstanceAdminGrpc() {} return getListInstancePartitionOperationsMethod; } + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.MoveInstanceRequest, + com.google.longrunning.Operation> + getMoveInstanceMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "MoveInstance", + requestType = com.google.spanner.admin.instance.v1.MoveInstanceRequest.class, + responseType = com.google.longrunning.Operation.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.MoveInstanceRequest, + com.google.longrunning.Operation> + getMoveInstanceMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.instance.v1.MoveInstanceRequest, + com.google.longrunning.Operation> + getMoveInstanceMethod; + if ((getMoveInstanceMethod = InstanceAdminGrpc.getMoveInstanceMethod) == null) { + synchronized (InstanceAdminGrpc.class) { + if ((getMoveInstanceMethod = InstanceAdminGrpc.getMoveInstanceMethod) == null) { + InstanceAdminGrpc.getMoveInstanceMethod = + getMoveInstanceMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "MoveInstance")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.instance.v1.MoveInstanceRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.longrunning.Operation.getDefaultInstance())) + .setSchemaDescriptor( + new InstanceAdminMethodDescriptorSupplier("MoveInstance")) + .build(); + } + } + } + return getMoveInstanceMethod; + } + /** Creates a new async stub that supports all call types for the service */ public static InstanceAdminStub newStub(io.grpc.Channel channel) { io.grpc.stub.AbstractStub.StubFactory factory = @@ -1098,31 +1144,31 @@ default void getInstanceConfig( * * *
    -     * Creates an instance config and begins preparing it to be used. The
    +     * Creates an instance configuration and begins preparing it to be used. The
          * returned [long-running operation][google.longrunning.Operation]
          * can be used to track the progress of preparing the new
    -     * instance config. The instance config name is assigned by the caller. If the
    -     * named instance config already exists, `CreateInstanceConfig` returns
    -     * `ALREADY_EXISTS`.
    +     * instance configuration. The instance configuration name is assigned by the
    +     * caller. If the named instance configuration already exists,
    +     * `CreateInstanceConfig` returns `ALREADY_EXISTS`.
          * Immediately after the request returns:
    -     *   * The instance config is readable via the API, with all requested
    -     *     attributes. The instance config's
    +     *   * The instance configuration is readable via the API, with all requested
    +     *     attributes. The instance configuration's
          *     [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
          *     field is set to true. Its state is `CREATING`.
          * While the operation is pending:
    -     *   * Cancelling the operation renders the instance config immediately
    +     *   * Cancelling the operation renders the instance configuration immediately
          *     unreadable via the API.
          *   * Except for deleting the creating resource, all other attempts to modify
    -     *     the instance config are rejected.
    +     *     the instance configuration are rejected.
          * Upon completion of the returned operation:
          *   * Instances can be created using the instance configuration.
    -     *   * The instance config's
    +     *   * The instance configuration's
          *   [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
          *   field becomes false. Its state becomes `READY`.
          * The returned [long-running operation][google.longrunning.Operation] will
          * have a name of the format
          * `<instance_config_name>/operations/<operation_id>` and can be used to track
    -     * creation of the instance config. The
    +     * creation of the instance configuration. The
          * [metadata][google.longrunning.Operation.metadata] field type is
          * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
          * The [response][google.longrunning.Operation.response] field type is
    @@ -1144,13 +1190,13 @@ default void createInstanceConfig(
          *
          *
          * 
    -     * Updates an instance config. The returned
    +     * Updates an instance configuration. The returned
          * [long-running operation][google.longrunning.Operation] can be used to track
    -     * the progress of updating the instance. If the named instance config does
    -     * not exist, returns `NOT_FOUND`.
    -     * Only user managed configurations can be updated.
    +     * the progress of updating the instance. If the named instance configuration
    +     * does not exist, returns `NOT_FOUND`.
    +     * Only user-managed configurations can be updated.
          * Immediately after the request returns:
    -     *   * The instance config's
    +     *   * The instance configuration's
          *     [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
          *     field is set to true.
          * While the operation is pending:
    @@ -1158,20 +1204,20 @@ default void createInstanceConfig(
          *     [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time].
          *     The operation is guaranteed to succeed at undoing all changes, after
          *     which point it terminates with a `CANCELLED` status.
    -     *   * All other attempts to modify the instance config are rejected.
    -     *   * Reading the instance config via the API continues to give the
    +     *   * All other attempts to modify the instance configuration are rejected.
    +     *   * Reading the instance configuration via the API continues to give the
          *     pre-request values.
          * Upon completion of the returned operation:
          *   * Creating instances using the instance configuration uses the new
          *     values.
    -     *   * The instance config's new values are readable via the API.
    -     *   * The instance config's
    +     *   * The new values of the instance configuration are readable via the API.
    +     *   * The instance configuration's
          *   [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
          *   field becomes false.
          * The returned [long-running operation][google.longrunning.Operation] will
          * have a name of the format
          * `<instance_config_name>/operations/<operation_id>` and can be used to track
    -     * the instance config modification.  The
    +     * the instance configuration modification.  The
          * [metadata][google.longrunning.Operation.metadata] field type is
          * [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata].
          * The [response][google.longrunning.Operation.response] field type is
    @@ -1192,10 +1238,10 @@ default void updateInstanceConfig(
          *
          *
          * 
    -     * Deletes the instance config. Deletion is only allowed when no
    +     * Deletes the instance configuration. Deletion is only allowed when no
          * instances are using the configuration. If any instances are using
    -     * the config, returns `FAILED_PRECONDITION`.
    -     * Only user managed configurations can be deleted.
    +     * the configuration, returns `FAILED_PRECONDITION`.
    +     * Only user-managed configurations can be deleted.
          * Authorization requires `spanner.instanceConfigs.delete` permission on
          * the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
          * 
    @@ -1211,9 +1257,9 @@ default void deleteInstanceConfig( * * *
    -     * Lists the user-managed instance config [long-running
    +     * Lists the user-managed instance configuration [long-running
          * operations][google.longrunning.Operation] in the given project. An instance
    -     * config operation has a name of the form
    +     * configuration operation has a name of the form
          * `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`.
          * The long-running operation
          * [metadata][google.longrunning.Operation.metadata] field type
    @@ -1591,6 +1637,69 @@ default void listInstancePartitionOperations(
           io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(
               getListInstancePartitionOperationsMethod(), responseObserver);
         }
    +
    +    /**
    +     *
    +     *
    +     * 
    +     * Moves an instance to the target instance configuration. You can use the
    +     * returned [long-running operation][google.longrunning.Operation] to track
    +     * the progress of moving the instance.
    +     * `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of
    +     * the following criteria:
    +     *   * Is undergoing a move to a different instance configuration
    +     *   * Has backups
    +     *   * Has an ongoing update
    +     *   * Contains any CMEK-enabled databases
    +     *   * Is a free trial instance
    +     * While the operation is pending:
    +     *   * All other attempts to modify the instance, including changes to its
    +     *     compute capacity, are rejected.
    +     *   * The following database and backup admin operations are rejected:
    +     *     * `DatabaseAdmin.CreateDatabase`
    +     *     * `DatabaseAdmin.UpdateDatabaseDdl` (disabled if default_leader is
    +     *        specified in the request.)
    +     *     * `DatabaseAdmin.RestoreDatabase`
    +     *     * `DatabaseAdmin.CreateBackup`
    +     *     * `DatabaseAdmin.CopyBackup`
    +     *   * Both the source and target instance configurations are subject to
    +     *     hourly compute and storage charges.
    +     *   * The instance might experience higher read-write latencies and a higher
    +     *     transaction abort rate. However, moving an instance doesn't cause any
    +     *     downtime.
    +     * The returned [long-running operation][google.longrunning.Operation] has
    +     * a name of the format
    +     * `<instance_name>/operations/<operation_id>` and can be used to track
    +     * the move instance operation. The
    +     * [metadata][google.longrunning.Operation.metadata] field type is
    +     * [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata].
    +     * The [response][google.longrunning.Operation.response] field type is
    +     * [Instance][google.spanner.admin.instance.v1.Instance],
    +     * if successful.
    +     * Cancelling the operation sets its metadata's
    +     * [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time].
    +     * Cancellation is not immediate because it involves moving any data
    +     * previously moved to the target instance configuration back to the original
    +     * instance configuration. You can use this operation to track the progress of
    +     * the cancellation. Upon successful completion of the cancellation, the
    +     * operation terminates with `CANCELLED` status.
    +     * If not cancelled, upon completion of the returned operation:
    +     *   * The instance successfully moves to the target instance
    +     *     configuration.
    +     *   * You are billed for compute and storage in target instance
    +     *   configuration.
    +     * Authorization requires the `spanner.instances.update` permission on
    +     * the resource [instance][google.spanner.admin.instance.v1.Instance].
    +     * For more details, see
    +     * [Move an instance](https://cloud.google.com/spanner/docs/move-instance).
    +     * 
    + */ + default void moveInstance( + com.google.spanner.admin.instance.v1.MoveInstanceRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getMoveInstanceMethod(), responseObserver); + } } /** @@ -1700,31 +1809,31 @@ public void getInstanceConfig( * * *
    -     * Creates an instance config and begins preparing it to be used. The
    +     * Creates an instance configuration and begins preparing it to be used. The
          * returned [long-running operation][google.longrunning.Operation]
          * can be used to track the progress of preparing the new
    -     * instance config. The instance config name is assigned by the caller. If the
    -     * named instance config already exists, `CreateInstanceConfig` returns
    -     * `ALREADY_EXISTS`.
    +     * instance configuration. The instance configuration name is assigned by the
    +     * caller. If the named instance configuration already exists,
    +     * `CreateInstanceConfig` returns `ALREADY_EXISTS`.
          * Immediately after the request returns:
    -     *   * The instance config is readable via the API, with all requested
    -     *     attributes. The instance config's
    +     *   * The instance configuration is readable via the API, with all requested
    +     *     attributes. The instance configuration's
          *     [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
          *     field is set to true. Its state is `CREATING`.
          * While the operation is pending:
    -     *   * Cancelling the operation renders the instance config immediately
    +     *   * Cancelling the operation renders the instance configuration immediately
          *     unreadable via the API.
          *   * Except for deleting the creating resource, all other attempts to modify
    -     *     the instance config are rejected.
    +     *     the instance configuration are rejected.
          * Upon completion of the returned operation:
          *   * Instances can be created using the instance configuration.
    -     *   * The instance config's
    +     *   * The instance configuration's
          *   [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
          *   field becomes false. Its state becomes `READY`.
          * The returned [long-running operation][google.longrunning.Operation] will
          * have a name of the format
          * `<instance_config_name>/operations/<operation_id>` and can be used to track
    -     * creation of the instance config. The
    +     * creation of the instance configuration. The
          * [metadata][google.longrunning.Operation.metadata] field type is
          * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
          * The [response][google.longrunning.Operation.response] field type is
    @@ -1748,13 +1857,13 @@ public void createInstanceConfig(
          *
          *
          * 
    -     * Updates an instance config. The returned
    +     * Updates an instance configuration. The returned
          * [long-running operation][google.longrunning.Operation] can be used to track
    -     * the progress of updating the instance. If the named instance config does
    -     * not exist, returns `NOT_FOUND`.
    -     * Only user managed configurations can be updated.
    +     * the progress of updating the instance. If the named instance configuration
    +     * does not exist, returns `NOT_FOUND`.
    +     * Only user-managed configurations can be updated.
          * Immediately after the request returns:
    -     *   * The instance config's
    +     *   * The instance configuration's
          *     [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
          *     field is set to true.
          * While the operation is pending:
    @@ -1762,20 +1871,20 @@ public void createInstanceConfig(
          *     [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time].
          *     The operation is guaranteed to succeed at undoing all changes, after
          *     which point it terminates with a `CANCELLED` status.
    -     *   * All other attempts to modify the instance config are rejected.
    -     *   * Reading the instance config via the API continues to give the
    +     *   * All other attempts to modify the instance configuration are rejected.
    +     *   * Reading the instance configuration via the API continues to give the
          *     pre-request values.
          * Upon completion of the returned operation:
          *   * Creating instances using the instance configuration uses the new
          *     values.
    -     *   * The instance config's new values are readable via the API.
    -     *   * The instance config's
    +     *   * The new values of the instance configuration are readable via the API.
    +     *   * The instance configuration's
          *   [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
          *   field becomes false.
          * The returned [long-running operation][google.longrunning.Operation] will
          * have a name of the format
          * `<instance_config_name>/operations/<operation_id>` and can be used to track
    -     * the instance config modification.  The
    +     * the instance configuration modification.  The
          * [metadata][google.longrunning.Operation.metadata] field type is
          * [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata].
          * The [response][google.longrunning.Operation.response] field type is
    @@ -1798,10 +1907,10 @@ public void updateInstanceConfig(
          *
          *
          * 
    -     * Deletes the instance config. Deletion is only allowed when no
    +     * Deletes the instance configuration. Deletion is only allowed when no
          * instances are using the configuration. If any instances are using
    -     * the config, returns `FAILED_PRECONDITION`.
    -     * Only user managed configurations can be deleted.
    +     * the configuration, returns `FAILED_PRECONDITION`.
    +     * Only user-managed configurations can be deleted.
          * Authorization requires `spanner.instanceConfigs.delete` permission on
          * the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
          * 
    @@ -1819,9 +1928,9 @@ public void deleteInstanceConfig( * * *
    -     * Lists the user-managed instance config [long-running
    +     * Lists the user-managed instance configuration [long-running
          * operations][google.longrunning.Operation] in the given project. An instance
    -     * config operation has a name of the form
    +     * configuration operation has a name of the form
          * `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`.
          * The long-running operation
          * [metadata][google.longrunning.Operation.metadata] field type
    @@ -2229,6 +2338,71 @@ public void listInstancePartitionOperations(
               request,
               responseObserver);
         }
    +
    +    /**
    +     *
    +     *
    +     * 
    +     * Moves an instance to the target instance configuration. You can use the
    +     * returned [long-running operation][google.longrunning.Operation] to track
    +     * the progress of moving the instance.
    +     * `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of
    +     * the following criteria:
    +     *   * Is undergoing a move to a different instance configuration
    +     *   * Has backups
    +     *   * Has an ongoing update
    +     *   * Contains any CMEK-enabled databases
    +     *   * Is a free trial instance
    +     * While the operation is pending:
    +     *   * All other attempts to modify the instance, including changes to its
    +     *     compute capacity, are rejected.
    +     *   * The following database and backup admin operations are rejected:
    +     *     * `DatabaseAdmin.CreateDatabase`
    +     *     * `DatabaseAdmin.UpdateDatabaseDdl` (disabled if default_leader is
    +     *        specified in the request.)
    +     *     * `DatabaseAdmin.RestoreDatabase`
    +     *     * `DatabaseAdmin.CreateBackup`
    +     *     * `DatabaseAdmin.CopyBackup`
    +     *   * Both the source and target instance configurations are subject to
    +     *     hourly compute and storage charges.
    +     *   * The instance might experience higher read-write latencies and a higher
    +     *     transaction abort rate. However, moving an instance doesn't cause any
    +     *     downtime.
    +     * The returned [long-running operation][google.longrunning.Operation] has
    +     * a name of the format
    +     * `<instance_name>/operations/<operation_id>` and can be used to track
    +     * the move instance operation. The
    +     * [metadata][google.longrunning.Operation.metadata] field type is
    +     * [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata].
    +     * The [response][google.longrunning.Operation.response] field type is
    +     * [Instance][google.spanner.admin.instance.v1.Instance],
    +     * if successful.
    +     * Cancelling the operation sets its metadata's
    +     * [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time].
    +     * Cancellation is not immediate because it involves moving any data
    +     * previously moved to the target instance configuration back to the original
    +     * instance configuration. You can use this operation to track the progress of
    +     * the cancellation. Upon successful completion of the cancellation, the
    +     * operation terminates with `CANCELLED` status.
    +     * If not cancelled, upon completion of the returned operation:
    +     *   * The instance successfully moves to the target instance
    +     *     configuration.
    +     *   * You are billed for compute and storage in target instance
    +     *   configuration.
    +     * Authorization requires the `spanner.instances.update` permission on
    +     * the resource [instance][google.spanner.admin.instance.v1.Instance].
    +     * For more details, see
    +     * [Move an instance](https://cloud.google.com/spanner/docs/move-instance).
    +     * 
    + */ + public void moveInstance( + com.google.spanner.admin.instance.v1.MoveInstanceRequest request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getMoveInstanceMethod(), getCallOptions()), + request, + responseObserver); + } } /** @@ -2297,31 +2471,31 @@ public com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfig( * * *
    -     * Creates an instance config and begins preparing it to be used. The
    +     * Creates an instance configuration and begins preparing it to be used. The
          * returned [long-running operation][google.longrunning.Operation]
          * can be used to track the progress of preparing the new
    -     * instance config. The instance config name is assigned by the caller. If the
    -     * named instance config already exists, `CreateInstanceConfig` returns
    -     * `ALREADY_EXISTS`.
    +     * instance configuration. The instance configuration name is assigned by the
    +     * caller. If the named instance configuration already exists,
    +     * `CreateInstanceConfig` returns `ALREADY_EXISTS`.
          * Immediately after the request returns:
    -     *   * The instance config is readable via the API, with all requested
    -     *     attributes. The instance config's
    +     *   * The instance configuration is readable via the API, with all requested
    +     *     attributes. The instance configuration's
          *     [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
          *     field is set to true. Its state is `CREATING`.
          * While the operation is pending:
    -     *   * Cancelling the operation renders the instance config immediately
    +     *   * Cancelling the operation renders the instance configuration immediately
          *     unreadable via the API.
          *   * Except for deleting the creating resource, all other attempts to modify
    -     *     the instance config are rejected.
    +     *     the instance configuration are rejected.
          * Upon completion of the returned operation:
          *   * Instances can be created using the instance configuration.
    -     *   * The instance config's
    +     *   * The instance configuration's
          *   [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
          *   field becomes false. Its state becomes `READY`.
          * The returned [long-running operation][google.longrunning.Operation] will
          * have a name of the format
          * `<instance_config_name>/operations/<operation_id>` and can be used to track
    -     * creation of the instance config. The
    +     * creation of the instance configuration. The
          * [metadata][google.longrunning.Operation.metadata] field type is
          * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
          * The [response][google.longrunning.Operation.response] field type is
    @@ -2342,13 +2516,13 @@ public com.google.longrunning.Operation createInstanceConfig(
          *
          *
          * 
    -     * Updates an instance config. The returned
    +     * Updates an instance configuration. The returned
          * [long-running operation][google.longrunning.Operation] can be used to track
    -     * the progress of updating the instance. If the named instance config does
    -     * not exist, returns `NOT_FOUND`.
    -     * Only user managed configurations can be updated.
    +     * the progress of updating the instance. If the named instance configuration
    +     * does not exist, returns `NOT_FOUND`.
    +     * Only user-managed configurations can be updated.
          * Immediately after the request returns:
    -     *   * The instance config's
    +     *   * The instance configuration's
          *     [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
          *     field is set to true.
          * While the operation is pending:
    @@ -2356,20 +2530,20 @@ public com.google.longrunning.Operation createInstanceConfig(
          *     [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time].
          *     The operation is guaranteed to succeed at undoing all changes, after
          *     which point it terminates with a `CANCELLED` status.
    -     *   * All other attempts to modify the instance config are rejected.
    -     *   * Reading the instance config via the API continues to give the
    +     *   * All other attempts to modify the instance configuration are rejected.
    +     *   * Reading the instance configuration via the API continues to give the
          *     pre-request values.
          * Upon completion of the returned operation:
          *   * Creating instances using the instance configuration uses the new
          *     values.
    -     *   * The instance config's new values are readable via the API.
    -     *   * The instance config's
    +     *   * The new values of the instance configuration are readable via the API.
    +     *   * The instance configuration's
          *   [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
          *   field becomes false.
          * The returned [long-running operation][google.longrunning.Operation] will
          * have a name of the format
          * `<instance_config_name>/operations/<operation_id>` and can be used to track
    -     * the instance config modification.  The
    +     * the instance configuration modification.  The
          * [metadata][google.longrunning.Operation.metadata] field type is
          * [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata].
          * The [response][google.longrunning.Operation.response] field type is
    @@ -2389,10 +2563,10 @@ public com.google.longrunning.Operation updateInstanceConfig(
          *
          *
          * 
    -     * Deletes the instance config. Deletion is only allowed when no
    +     * Deletes the instance configuration. Deletion is only allowed when no
          * instances are using the configuration. If any instances are using
    -     * the config, returns `FAILED_PRECONDITION`.
    -     * Only user managed configurations can be deleted.
    +     * the configuration, returns `FAILED_PRECONDITION`.
    +     * Only user-managed configurations can be deleted.
          * Authorization requires `spanner.instanceConfigs.delete` permission on
          * the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
          * 
    @@ -2407,9 +2581,9 @@ public com.google.protobuf.Empty deleteInstanceConfig( * * *
    -     * Lists the user-managed instance config [long-running
    +     * Lists the user-managed instance configuration [long-running
          * operations][google.longrunning.Operation] in the given project. An instance
    -     * config operation has a name of the form
    +     * configuration operation has a name of the form
          * `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`.
          * The long-running operation
          * [metadata][google.longrunning.Operation.metadata] field type
    @@ -2763,6 +2937,68 @@ public com.google.longrunning.Operation updateInstancePartition(
           return io.grpc.stub.ClientCalls.blockingUnaryCall(
               getChannel(), getListInstancePartitionOperationsMethod(), getCallOptions(), request);
         }
    +
    +    /**
    +     *
    +     *
    +     * 
    +     * Moves an instance to the target instance configuration. You can use the
    +     * returned [long-running operation][google.longrunning.Operation] to track
    +     * the progress of moving the instance.
    +     * `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of
    +     * the following criteria:
    +     *   * Is undergoing a move to a different instance configuration
    +     *   * Has backups
    +     *   * Has an ongoing update
    +     *   * Contains any CMEK-enabled databases
    +     *   * Is a free trial instance
    +     * While the operation is pending:
    +     *   * All other attempts to modify the instance, including changes to its
    +     *     compute capacity, are rejected.
    +     *   * The following database and backup admin operations are rejected:
    +     *     * `DatabaseAdmin.CreateDatabase`
    +     *     * `DatabaseAdmin.UpdateDatabaseDdl` (disabled if default_leader is
    +     *        specified in the request.)
    +     *     * `DatabaseAdmin.RestoreDatabase`
    +     *     * `DatabaseAdmin.CreateBackup`
    +     *     * `DatabaseAdmin.CopyBackup`
    +     *   * Both the source and target instance configurations are subject to
    +     *     hourly compute and storage charges.
    +     *   * The instance might experience higher read-write latencies and a higher
    +     *     transaction abort rate. However, moving an instance doesn't cause any
    +     *     downtime.
    +     * The returned [long-running operation][google.longrunning.Operation] has
    +     * a name of the format
    +     * `<instance_name>/operations/<operation_id>` and can be used to track
    +     * the move instance operation. The
    +     * [metadata][google.longrunning.Operation.metadata] field type is
    +     * [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata].
    +     * The [response][google.longrunning.Operation.response] field type is
    +     * [Instance][google.spanner.admin.instance.v1.Instance],
    +     * if successful.
    +     * Cancelling the operation sets its metadata's
    +     * [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time].
    +     * Cancellation is not immediate because it involves moving any data
    +     * previously moved to the target instance configuration back to the original
    +     * instance configuration. You can use this operation to track the progress of
    +     * the cancellation. Upon successful completion of the cancellation, the
    +     * operation terminates with `CANCELLED` status.
    +     * If not cancelled, upon completion of the returned operation:
    +     *   * The instance successfully moves to the target instance
    +     *     configuration.
    +     *   * You are billed for compute and storage in target instance
    +     *   configuration.
    +     * Authorization requires the `spanner.instances.update` permission on
    +     * the resource [instance][google.spanner.admin.instance.v1.Instance].
    +     * For more details, see
    +     * [Move an instance](https://cloud.google.com/spanner/docs/move-instance).
    +     * 
    + */ + public com.google.longrunning.Operation moveInstance( + com.google.spanner.admin.instance.v1.MoveInstanceRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getMoveInstanceMethod(), getCallOptions(), request); + } } /** @@ -2834,31 +3070,31 @@ protected InstanceAdminFutureStub build( * * *
    -     * Creates an instance config and begins preparing it to be used. The
    +     * Creates an instance configuration and begins preparing it to be used. The
          * returned [long-running operation][google.longrunning.Operation]
          * can be used to track the progress of preparing the new
    -     * instance config. The instance config name is assigned by the caller. If the
    -     * named instance config already exists, `CreateInstanceConfig` returns
    -     * `ALREADY_EXISTS`.
    +     * instance configuration. The instance configuration name is assigned by the
    +     * caller. If the named instance configuration already exists,
    +     * `CreateInstanceConfig` returns `ALREADY_EXISTS`.
          * Immediately after the request returns:
    -     *   * The instance config is readable via the API, with all requested
    -     *     attributes. The instance config's
    +     *   * The instance configuration is readable via the API, with all requested
    +     *     attributes. The instance configuration's
          *     [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
          *     field is set to true. Its state is `CREATING`.
          * While the operation is pending:
    -     *   * Cancelling the operation renders the instance config immediately
    +     *   * Cancelling the operation renders the instance configuration immediately
          *     unreadable via the API.
          *   * Except for deleting the creating resource, all other attempts to modify
    -     *     the instance config are rejected.
    +     *     the instance configuration are rejected.
          * Upon completion of the returned operation:
          *   * Instances can be created using the instance configuration.
    -     *   * The instance config's
    +     *   * The instance configuration's
          *   [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
          *   field becomes false. Its state becomes `READY`.
          * The returned [long-running operation][google.longrunning.Operation] will
          * have a name of the format
          * `<instance_config_name>/operations/<operation_id>` and can be used to track
    -     * creation of the instance config. The
    +     * creation of the instance configuration. The
          * [metadata][google.longrunning.Operation.metadata] field type is
          * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
          * The [response][google.longrunning.Operation.response] field type is
    @@ -2880,13 +3116,13 @@ protected InstanceAdminFutureStub build(
          *
          *
          * 
    -     * Updates an instance config. The returned
    +     * Updates an instance configuration. The returned
          * [long-running operation][google.longrunning.Operation] can be used to track
    -     * the progress of updating the instance. If the named instance config does
    -     * not exist, returns `NOT_FOUND`.
    -     * Only user managed configurations can be updated.
    +     * the progress of updating the instance. If the named instance configuration
    +     * does not exist, returns `NOT_FOUND`.
    +     * Only user-managed configurations can be updated.
          * Immediately after the request returns:
    -     *   * The instance config's
    +     *   * The instance configuration's
          *     [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
          *     field is set to true.
          * While the operation is pending:
    @@ -2894,20 +3130,20 @@ protected InstanceAdminFutureStub build(
          *     [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time].
          *     The operation is guaranteed to succeed at undoing all changes, after
          *     which point it terminates with a `CANCELLED` status.
    -     *   * All other attempts to modify the instance config are rejected.
    -     *   * Reading the instance config via the API continues to give the
    +     *   * All other attempts to modify the instance configuration are rejected.
    +     *   * Reading the instance configuration via the API continues to give the
          *     pre-request values.
          * Upon completion of the returned operation:
          *   * Creating instances using the instance configuration uses the new
          *     values.
    -     *   * The instance config's new values are readable via the API.
    -     *   * The instance config's
    +     *   * The new values of the instance configuration are readable via the API.
    +     *   * The instance configuration's
          *   [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
          *   field becomes false.
          * The returned [long-running operation][google.longrunning.Operation] will
          * have a name of the format
          * `<instance_config_name>/operations/<operation_id>` and can be used to track
    -     * the instance config modification.  The
    +     * the instance configuration modification.  The
          * [metadata][google.longrunning.Operation.metadata] field type is
          * [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata].
          * The [response][google.longrunning.Operation.response] field type is
    @@ -2928,10 +3164,10 @@ protected InstanceAdminFutureStub build(
          *
          *
          * 
    -     * Deletes the instance config. Deletion is only allowed when no
    +     * Deletes the instance configuration. Deletion is only allowed when no
          * instances are using the configuration. If any instances are using
    -     * the config, returns `FAILED_PRECONDITION`.
    -     * Only user managed configurations can be deleted.
    +     * the configuration, returns `FAILED_PRECONDITION`.
    +     * Only user-managed configurations can be deleted.
          * Authorization requires `spanner.instanceConfigs.delete` permission on
          * the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
          * 
    @@ -2947,9 +3183,9 @@ protected InstanceAdminFutureStub build( * * *
    -     * Lists the user-managed instance config [long-running
    +     * Lists the user-managed instance configuration [long-running
          * operations][google.longrunning.Operation] in the given project. An instance
    -     * config operation has a name of the form
    +     * configuration operation has a name of the form
          * `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`.
          * The long-running operation
          * [metadata][google.longrunning.Operation.metadata] field type
    @@ -3317,6 +3553,68 @@ protected InstanceAdminFutureStub build(
               getChannel().newCall(getListInstancePartitionOperationsMethod(), getCallOptions()),
               request);
         }
    +
    +    /**
    +     *
    +     *
    +     * 
    +     * Moves an instance to the target instance configuration. You can use the
    +     * returned [long-running operation][google.longrunning.Operation] to track
    +     * the progress of moving the instance.
    +     * `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of
    +     * the following criteria:
    +     *   * Is undergoing a move to a different instance configuration
    +     *   * Has backups
    +     *   * Has an ongoing update
    +     *   * Contains any CMEK-enabled databases
    +     *   * Is a free trial instance
    +     * While the operation is pending:
    +     *   * All other attempts to modify the instance, including changes to its
    +     *     compute capacity, are rejected.
    +     *   * The following database and backup admin operations are rejected:
    +     *     * `DatabaseAdmin.CreateDatabase`
    +     *     * `DatabaseAdmin.UpdateDatabaseDdl` (disabled if default_leader is
    +     *        specified in the request.)
    +     *     * `DatabaseAdmin.RestoreDatabase`
    +     *     * `DatabaseAdmin.CreateBackup`
    +     *     * `DatabaseAdmin.CopyBackup`
    +     *   * Both the source and target instance configurations are subject to
    +     *     hourly compute and storage charges.
    +     *   * The instance might experience higher read-write latencies and a higher
    +     *     transaction abort rate. However, moving an instance doesn't cause any
    +     *     downtime.
    +     * The returned [long-running operation][google.longrunning.Operation] has
    +     * a name of the format
    +     * `<instance_name>/operations/<operation_id>` and can be used to track
    +     * the move instance operation. The
    +     * [metadata][google.longrunning.Operation.metadata] field type is
    +     * [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata].
    +     * The [response][google.longrunning.Operation.response] field type is
    +     * [Instance][google.spanner.admin.instance.v1.Instance],
    +     * if successful.
    +     * Cancelling the operation sets its metadata's
    +     * [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time].
    +     * Cancellation is not immediate because it involves moving any data
    +     * previously moved to the target instance configuration back to the original
    +     * instance configuration. You can use this operation to track the progress of
    +     * the cancellation. Upon successful completion of the cancellation, the
    +     * operation terminates with `CANCELLED` status.
    +     * If not cancelled, upon completion of the returned operation:
    +     *   * The instance successfully moves to the target instance
    +     *     configuration.
    +     *   * You are billed for compute and storage in target instance
    +     *   configuration.
    +     * Authorization requires the `spanner.instances.update` permission on
    +     * the resource [instance][google.spanner.admin.instance.v1.Instance].
    +     * For more details, see
    +     * [Move an instance](https://cloud.google.com/spanner/docs/move-instance).
    +     * 
    + */ + public com.google.common.util.concurrent.ListenableFuture + moveInstance(com.google.spanner.admin.instance.v1.MoveInstanceRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getMoveInstanceMethod(), getCallOptions()), request); + } } private static final int METHODID_LIST_INSTANCE_CONFIGS = 0; @@ -3339,6 +3637,7 @@ protected InstanceAdminFutureStub build( private static final int METHODID_DELETE_INSTANCE_PARTITION = 17; private static final int METHODID_UPDATE_INSTANCE_PARTITION = 18; private static final int METHODID_LIST_INSTANCE_PARTITION_OPERATIONS = 19; + private static final int METHODID_MOVE_INSTANCE = 20; private static final class MethodHandlers implements io.grpc.stub.ServerCalls.UnaryMethod, @@ -3471,6 +3770,11 @@ public void invoke(Req request, io.grpc.stub.StreamObserver responseObserv com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse>) responseObserver); break; + case METHODID_MOVE_INSTANCE: + serviceImpl.moveInstance( + (com.google.spanner.admin.instance.v1.MoveInstanceRequest) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; default: throw new AssertionError(); } @@ -3615,6 +3919,12 @@ public static final io.grpc.ServerServiceDefinition bindService(AsyncService ser com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest, com.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse>( service, METHODID_LIST_INSTANCE_PARTITION_OPERATIONS))) + .addMethod( + getMoveInstanceMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.instance.v1.MoveInstanceRequest, + com.google.longrunning.Operation>(service, METHODID_MOVE_INSTANCE))) .build(); } @@ -3686,6 +3996,7 @@ public static io.grpc.ServiceDescriptor getServiceDescriptor() { .addMethod(getDeleteInstancePartitionMethod()) .addMethod(getUpdateInstancePartitionMethod()) .addMethod(getListInstancePartitionOperationsMethod()) + .addMethod(getMoveInstanceMethod()) .build(); } } diff --git a/grpc-google-cloud-spanner-executor-v1/pom.xml b/grpc-google-cloud-spanner-executor-v1/pom.xml index 2ec300ac11e..f220bce4dcc 100644 --- a/grpc-google-cloud-spanner-executor-v1/pom.xml +++ b/grpc-google-cloud-spanner-executor-v1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-spanner-executor-v1 - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT grpc-google-cloud-spanner-executor-v1 GRPC library for google-cloud-spanner com.google.cloud google-cloud-spanner-parent - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT diff --git a/grpc-google-cloud-spanner-v1/pom.xml b/grpc-google-cloud-spanner-v1/pom.xml index 971c5d46129..d2e4d22889b 100644 --- a/grpc-google-cloud-spanner-v1/pom.xml +++ b/grpc-google-cloud-spanner-v1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-spanner-v1 - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT grpc-google-cloud-spanner-v1 GRPC library for grpc-google-cloud-spanner-v1 com.google.cloud google-cloud-spanner-parent - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT diff --git a/pom.xml b/pom.xml index b17ff06e937..62805706799 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ com.google.cloud google-cloud-spanner-parent pom - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT Google Cloud Spanner Parent https://github.com/googleapis/java-spanner @@ -14,7 +14,7 @@ com.google.cloud sdk-platform-java-config - 3.30.0 + 3.37.0 @@ -61,47 +61,47 @@ com.google.api.grpc proto-google-cloud-spanner-admin-instance-v1 - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT com.google.api.grpc proto-google-cloud-spanner-executor-v1 - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT com.google.api.grpc grpc-google-cloud-spanner-executor-v1 - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT com.google.api.grpc proto-google-cloud-spanner-v1 - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT com.google.api.grpc proto-google-cloud-spanner-admin-database-v1 - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT com.google.api.grpc grpc-google-cloud-spanner-v1 - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT com.google.api.grpc grpc-google-cloud-spanner-admin-instance-v1 - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT com.google.api.grpc grpc-google-cloud-spanner-admin-database-v1 - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT com.google.cloud google-cloud-spanner - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT @@ -121,7 +121,7 @@ com.google.truth truth - 1.4.2 + 1.4.4 test @@ -171,7 +171,7 @@ org.apache.maven.plugins maven-project-info-reports-plugin - 3.5.0 + 3.7.0 diff --git a/proto-google-cloud-spanner-admin-database-v1/clirr-ignored-differences.xml b/proto-google-cloud-spanner-admin-database-v1/clirr-ignored-differences.xml index c1e8fc9b84a..3799fb341ac 100644 --- a/proto-google-cloud-spanner-admin-database-v1/clirr-ignored-differences.xml +++ b/proto-google-cloud-spanner-admin-database-v1/clirr-ignored-differences.xml @@ -16,4 +16,66 @@ com/google/spanner/admin/database/v1/*OrBuilder boolean has*(*) + + + + 7006 + com/google/spanner/admin/database/v1/** + * getDefaultInstanceForType() + ** + + + 7006 + com/google/spanner/admin/database/v1/** + * addRepeatedField(*) + ** + + + 7006 + com/google/spanner/admin/database/v1/** + * clear() + ** + + + 7006 + com/google/spanner/admin/database/v1/** + * clearField(*) + ** + + + 7006 + com/google/spanner/admin/database/v1/** + * clearOneof(*) + ** + + + 7006 + com/google/spanner/admin/database/v1/** + * clone() + ** + + + 7006 + com/google/spanner/admin/database/v1/** + * mergeUnknownFields(*) + ** + + + 7006 + com/google/spanner/admin/database/v1/** + * setField(*) + ** + + + 7006 + com/google/spanner/admin/database/v1/** + * setRepeatedField(*) + ** + + + 7006 + com/google/spanner/admin/database/v1/** + * setUnknownFields(*) + ** + diff --git a/proto-google-cloud-spanner-admin-database-v1/pom.xml b/proto-google-cloud-spanner-admin-database-v1/pom.xml index 06edeb77672..ea176b8be35 100644 --- a/proto-google-cloud-spanner-admin-database-v1/pom.xml +++ b/proto-google-cloud-spanner-admin-database-v1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-spanner-admin-database-v1 - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT proto-google-cloud-spanner-admin-database-v1 PROTO library for proto-google-cloud-spanner-admin-database-v1 com.google.cloud google-cloud-spanner-parent - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/Backup.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/Backup.java index 03fe9603fd2..bbcb39c5f93 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/Backup.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/Backup.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** @@ -43,8 +43,11 @@ private Backup() { name_ = ""; state_ = 0; referencingDatabases_ = com.google.protobuf.LazyStringArrayList.emptyList(); + encryptionInformation_ = java.util.Collections.emptyList(); databaseDialect_ = 0; referencingBackups_ = com.google.protobuf.LazyStringArrayList.emptyList(); + backupSchedules_ = com.google.protobuf.LazyStringArrayList.emptyList(); + incrementalBackupChainId_ = ""; } @java.lang.Override @@ -236,10 +239,10 @@ private State(int value) { * * *
    -   * Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    -   * Name of the database from which this backup was
    -   * created. This needs to be in the same instance as the backup.
    -   * Values are of the form
    +   * Required for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation. Name of the database from which this backup was created. This
    +   * needs to be in the same instance as the backup. Values are of the form
        * `projects/<project>/instances/<instance>/databases/<database>`.
        * 
    * @@ -263,10 +266,10 @@ public java.lang.String getDatabase() { * * *
    -   * Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    -   * Name of the database from which this backup was
    -   * created. This needs to be in the same instance as the backup.
    -   * Values are of the form
    +   * Required for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation. Name of the database from which this backup was created. This
    +   * needs to be in the same instance as the backup. Values are of the form
        * `projects/<project>/instances/<instance>/databases/<database>`.
        * 
    * @@ -348,7 +351,8 @@ public com.google.protobuf.TimestampOrBuilder getVersionTimeOrBuilder() { * * *
    -   * Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * Required for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
        * operation. The expiration time of the backup, with microseconds
        * granularity that must be at least 6 hours and at most 366 days
        * from the time the CreateBackup request is processed. Once the `expire_time`
    @@ -368,7 +372,8 @@ public boolean hasExpireTime() {
        *
        *
        * 
    -   * Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * Required for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
        * operation. The expiration time of the backup, with microseconds
        * granularity that must be at least 6 hours and at most 366 days
        * from the time the CreateBackup request is processed. Once the `expire_time`
    @@ -388,7 +393,8 @@ public com.google.protobuf.Timestamp getExpireTime() {
        *
        *
        * 
    -   * Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * Required for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
        * operation. The expiration time of the backup, with microseconds
        * granularity that must be at least 6 hours and at most 366 days
        * from the time the CreateBackup request is processed. Once the `expire_time`
    @@ -411,8 +417,11 @@ public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() {
        *
        *
        * 
    -   * Output only for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    -   * Required for the [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup] operation.
    +   * Output only for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation. Required for the
    +   * [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]
    +   * operation.
        *
        * A globally unique identifier for the backup which cannot be
        * changed. Values are of the form
    @@ -446,8 +455,11 @@ public java.lang.String getName() {
        *
        *
        * 
    -   * Output only for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    -   * Required for the [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup] operation.
    +   * Output only for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation. Required for the
    +   * [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]
    +   * operation.
        *
        * A globally unique identifier for the backup which cannot be
        * changed. Values are of the form
    @@ -484,7 +496,8 @@ public com.google.protobuf.ByteString getNameBytes() {
        *
        *
        * 
    -   * Output only. The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * Output only. The time the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
        * request is received. If the request does not specify `version_time`, the
        * `version_time` of the backup will be equivalent to the `create_time`.
        * 
    @@ -502,7 +515,8 @@ public boolean hasCreateTime() { * * *
    -   * Output only. The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * Output only. The time the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
        * request is received. If the request does not specify `version_time`, the
        * `version_time` of the backup will be equivalent to the `create_time`.
        * 
    @@ -520,7 +534,8 @@ public com.google.protobuf.Timestamp getCreateTime() { * * *
    -   * Output only. The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * Output only. The time the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
        * request is received. If the request does not specify `version_time`, the
        * `version_time` of the backup will be equivalent to the `create_time`.
        * 
    @@ -551,6 +566,54 @@ public long getSizeBytes() { return sizeBytes_; } + public static final int FREEABLE_SIZE_BYTES_FIELD_NUMBER = 15; + private long freeableSizeBytes_ = 0L; + /** + * + * + *
    +   * Output only. The number of bytes that will be freed by deleting this
    +   * backup. This value will be zero if, for example, this backup is part of an
    +   * incremental backup chain and younger backups in the chain require that we
    +   * keep its data. For backups not in an incremental backup chain, this is
    +   * always the size of the backup. This value may change if backups on the same
    +   * chain get created, deleted or expired.
    +   * 
    + * + * int64 freeable_size_bytes = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The freeableSizeBytes. + */ + @java.lang.Override + public long getFreeableSizeBytes() { + return freeableSizeBytes_; + } + + public static final int EXCLUSIVE_SIZE_BYTES_FIELD_NUMBER = 16; + private long exclusiveSizeBytes_ = 0L; + /** + * + * + *
    +   * Output only. For a backup in an incremental backup chain, this is the
    +   * storage space needed to keep the data that has changed since the previous
    +   * backup. For all other backups, this is always the size of the backup. This
    +   * value may change if backups on the same chain get deleted or expired.
    +   *
    +   * This field can be used to calculate the total storage space used by a set
    +   * of backups. For example, the total space used by all backups of a database
    +   * can be computed by summing up this field.
    +   * 
    + * + * int64 exclusive_size_bytes = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The exclusiveSizeBytes. + */ + @java.lang.Override + public long getExclusiveSizeBytes() { + return exclusiveSizeBytes_; + } + public static final int STATE_FIELD_NUMBER = 6; private int state_ = 0; /** @@ -742,6 +805,115 @@ public com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder getEncryptio : encryptionInfo_; } + public static final int ENCRYPTION_INFORMATION_FIELD_NUMBER = 13; + + @SuppressWarnings("serial") + private java.util.List + encryptionInformation_; + /** + * + * + *
    +   * Output only. The encryption information for the backup, whether it is
    +   * protected by one or more KMS keys. The information includes all Cloud
    +   * KMS key versions used to encrypt the backup. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated. At least one of the key
    +   * versions must be available for the backup to be restored. If a key version
    +   * is revoked in the middle of a restore, the restore behavior is undefined.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List + getEncryptionInformationList() { + return encryptionInformation_; + } + /** + * + * + *
    +   * Output only. The encryption information for the backup, whether it is
    +   * protected by one or more KMS keys. The information includes all Cloud
    +   * KMS key versions used to encrypt the backup. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated. At least one of the key
    +   * versions must be available for the backup to be restored. If a key version
    +   * is revoked in the middle of a restore, the restore behavior is undefined.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List + getEncryptionInformationOrBuilderList() { + return encryptionInformation_; + } + /** + * + * + *
    +   * Output only. The encryption information for the backup, whether it is
    +   * protected by one or more KMS keys. The information includes all Cloud
    +   * KMS key versions used to encrypt the backup. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated. At least one of the key
    +   * versions must be available for the backup to be restored. If a key version
    +   * is revoked in the middle of a restore, the restore behavior is undefined.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public int getEncryptionInformationCount() { + return encryptionInformation_.size(); + } + /** + * + * + *
    +   * Output only. The encryption information for the backup, whether it is
    +   * protected by one or more KMS keys. The information includes all Cloud
    +   * KMS key versions used to encrypt the backup. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated. At least one of the key
    +   * versions must be available for the backup to be restored. If a key version
    +   * is revoked in the middle of a restore, the restore behavior is undefined.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionInfo getEncryptionInformation(int index) { + return encryptionInformation_.get(index); + } + /** + * + * + *
    +   * Output only. The encryption information for the backup, whether it is
    +   * protected by one or more KMS keys. The information includes all Cloud
    +   * KMS key versions used to encrypt the backup. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated. At least one of the key
    +   * versions must be available for the backup to be restored. If a key version
    +   * is revoked in the middle of a restore, the restore behavior is undefined.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder + getEncryptionInformationOrBuilder(int index) { + return encryptionInformation_.get(index); + } + public static final int DATABASE_DIALECT_FIELD_NUMBER = 10; private int databaseDialect_ = 0; /** @@ -947,6 +1119,242 @@ public com.google.protobuf.TimestampOrBuilder getMaxExpireTimeOrBuilder() { : maxExpireTime_; } + public static final int BACKUP_SCHEDULES_FIELD_NUMBER = 14; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList backupSchedules_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + /** + * + * + *
    +   * Output only. List of backup schedule URIs that are associated with
    +   * creating this backup. This is only applicable for scheduled backups, and
    +   * is empty for on-demand backups.
    +   *
    +   * To optimize for storage, whenever possible, multiple schedules are
    +   * collapsed together to create one backup. In such cases, this field captures
    +   * the list of all backup schedule URIs that are associated with creating
    +   * this backup. If collapsing is not done, then this field captures the
    +   * single backup schedule URI associated with creating this backup.
    +   * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the backupSchedules. + */ + public com.google.protobuf.ProtocolStringList getBackupSchedulesList() { + return backupSchedules_; + } + /** + * + * + *
    +   * Output only. List of backup schedule URIs that are associated with
    +   * creating this backup. This is only applicable for scheduled backups, and
    +   * is empty for on-demand backups.
    +   *
    +   * To optimize for storage, whenever possible, multiple schedules are
    +   * collapsed together to create one backup. In such cases, this field captures
    +   * the list of all backup schedule URIs that are associated with creating
    +   * this backup. If collapsing is not done, then this field captures the
    +   * single backup schedule URI associated with creating this backup.
    +   * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The count of backupSchedules. + */ + public int getBackupSchedulesCount() { + return backupSchedules_.size(); + } + /** + * + * + *
    +   * Output only. List of backup schedule URIs that are associated with
    +   * creating this backup. This is only applicable for scheduled backups, and
    +   * is empty for on-demand backups.
    +   *
    +   * To optimize for storage, whenever possible, multiple schedules are
    +   * collapsed together to create one backup. In such cases, this field captures
    +   * the list of all backup schedule URIs that are associated with creating
    +   * this backup. If collapsing is not done, then this field captures the
    +   * single backup schedule URI associated with creating this backup.
    +   * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The backupSchedules at the given index. + */ + public java.lang.String getBackupSchedules(int index) { + return backupSchedules_.get(index); + } + /** + * + * + *
    +   * Output only. List of backup schedule URIs that are associated with
    +   * creating this backup. This is only applicable for scheduled backups, and
    +   * is empty for on-demand backups.
    +   *
    +   * To optimize for storage, whenever possible, multiple schedules are
    +   * collapsed together to create one backup. In such cases, this field captures
    +   * the list of all backup schedule URIs that are associated with creating
    +   * this backup. If collapsing is not done, then this field captures the
    +   * single backup schedule URI associated with creating this backup.
    +   * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the backupSchedules at the given index. + */ + public com.google.protobuf.ByteString getBackupSchedulesBytes(int index) { + return backupSchedules_.getByteString(index); + } + + public static final int INCREMENTAL_BACKUP_CHAIN_ID_FIELD_NUMBER = 17; + + @SuppressWarnings("serial") + private volatile java.lang.Object incrementalBackupChainId_ = ""; + /** + * + * + *
    +   * Output only. Populated only for backups in an incremental backup chain.
    +   * Backups share the same chain id if and only if they belong to the same
    +   * incremental backup chain. Use this field to determine which backups are
    +   * part of the same incremental backup chain. The ordering of backups in the
    +   * chain can be determined by ordering the backup `version_time`.
    +   * 
    + * + * string incremental_backup_chain_id = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The incrementalBackupChainId. + */ + @java.lang.Override + public java.lang.String getIncrementalBackupChainId() { + java.lang.Object ref = incrementalBackupChainId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + incrementalBackupChainId_ = s; + return s; + } + } + /** + * + * + *
    +   * Output only. Populated only for backups in an incremental backup chain.
    +   * Backups share the same chain id if and only if they belong to the same
    +   * incremental backup chain. Use this field to determine which backups are
    +   * part of the same incremental backup chain. The ordering of backups in the
    +   * chain can be determined by ordering the backup `version_time`.
    +   * 
    + * + * string incremental_backup_chain_id = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The bytes for incrementalBackupChainId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getIncrementalBackupChainIdBytes() { + java.lang.Object ref = incrementalBackupChainId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + incrementalBackupChainId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OLDEST_VERSION_TIME_FIELD_NUMBER = 18; + private com.google.protobuf.Timestamp oldestVersionTime_; + /** + * + * + *
    +   * Output only. Data deleted at a time older than this is guaranteed not to be
    +   * retained in order to support this backup. For a backup in an incremental
    +   * backup chain, this is the version time of the oldest backup that exists or
    +   * ever existed in the chain. For all other backups, this is the version time
    +   * of the backup. This field can be used to understand what data is being
    +   * retained by the backup system.
    +   * 
    + * + * + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the oldestVersionTime field is set. + */ + @java.lang.Override + public boolean hasOldestVersionTime() { + return ((bitField0_ & 0x00000020) != 0); + } + /** + * + * + *
    +   * Output only. Data deleted at a time older than this is guaranteed not to be
    +   * retained in order to support this backup. For a backup in an incremental
    +   * backup chain, this is the version time of the oldest backup that exists or
    +   * ever existed in the chain. For all other backups, this is the version time
    +   * of the backup. This field can be used to understand what data is being
    +   * retained by the backup system.
    +   * 
    + * + * + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The oldestVersionTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getOldestVersionTime() { + return oldestVersionTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : oldestVersionTime_; + } + /** + * + * + *
    +   * Output only. Data deleted at a time older than this is guaranteed not to be
    +   * retained in order to support this backup. For a backup in an incremental
    +   * backup chain, this is the version time of the oldest backup that exists or
    +   * ever existed in the chain. For all other backups, this is the version time
    +   * of the backup. This field can be used to understand what data is being
    +   * retained by the backup system.
    +   * 
    + * + * + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getOldestVersionTimeOrBuilder() { + return oldestVersionTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : oldestVersionTime_; + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -1000,6 +1408,24 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (((bitField0_ & 0x00000010) != 0)) { output.writeMessage(12, getMaxExpireTime()); } + for (int i = 0; i < encryptionInformation_.size(); i++) { + output.writeMessage(13, encryptionInformation_.get(i)); + } + for (int i = 0; i < backupSchedules_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 14, backupSchedules_.getRaw(i)); + } + if (freeableSizeBytes_ != 0L) { + output.writeInt64(15, freeableSizeBytes_); + } + if (exclusiveSizeBytes_ != 0L) { + output.writeInt64(16, exclusiveSizeBytes_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(incrementalBackupChainId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 17, incrementalBackupChainId_); + } + if (((bitField0_ & 0x00000020) != 0)) { + output.writeMessage(18, getOldestVersionTime()); + } getUnknownFields().writeTo(output); } @@ -1057,6 +1483,32 @@ public int getSerializedSize() { if (((bitField0_ & 0x00000010) != 0)) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(12, getMaxExpireTime()); } + for (int i = 0; i < encryptionInformation_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 13, encryptionInformation_.get(i)); + } + { + int dataSize = 0; + for (int i = 0; i < backupSchedules_.size(); i++) { + dataSize += computeStringSizeNoTag(backupSchedules_.getRaw(i)); + } + size += dataSize; + size += 1 * getBackupSchedulesList().size(); + } + if (freeableSizeBytes_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(15, freeableSizeBytes_); + } + if (exclusiveSizeBytes_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(16, exclusiveSizeBytes_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(incrementalBackupChainId_)) { + size += + com.google.protobuf.GeneratedMessageV3.computeStringSize(17, incrementalBackupChainId_); + } + if (((bitField0_ & 0x00000020) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(18, getOldestVersionTime()); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -1088,18 +1540,27 @@ public boolean equals(final java.lang.Object obj) { if (!getCreateTime().equals(other.getCreateTime())) return false; } if (getSizeBytes() != other.getSizeBytes()) return false; + if (getFreeableSizeBytes() != other.getFreeableSizeBytes()) return false; + if (getExclusiveSizeBytes() != other.getExclusiveSizeBytes()) return false; if (state_ != other.state_) return false; if (!getReferencingDatabasesList().equals(other.getReferencingDatabasesList())) return false; if (hasEncryptionInfo() != other.hasEncryptionInfo()) return false; if (hasEncryptionInfo()) { if (!getEncryptionInfo().equals(other.getEncryptionInfo())) return false; } + if (!getEncryptionInformationList().equals(other.getEncryptionInformationList())) return false; if (databaseDialect_ != other.databaseDialect_) return false; if (!getReferencingBackupsList().equals(other.getReferencingBackupsList())) return false; if (hasMaxExpireTime() != other.hasMaxExpireTime()) return false; if (hasMaxExpireTime()) { if (!getMaxExpireTime().equals(other.getMaxExpireTime())) return false; } + if (!getBackupSchedulesList().equals(other.getBackupSchedulesList())) return false; + if (!getIncrementalBackupChainId().equals(other.getIncrementalBackupChainId())) return false; + if (hasOldestVersionTime() != other.hasOldestVersionTime()) return false; + if (hasOldestVersionTime()) { + if (!getOldestVersionTime().equals(other.getOldestVersionTime())) return false; + } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @@ -1129,6 +1590,10 @@ public int hashCode() { } hash = (37 * hash) + SIZE_BYTES_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getSizeBytes()); + hash = (37 * hash) + FREEABLE_SIZE_BYTES_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getFreeableSizeBytes()); + hash = (37 * hash) + EXCLUSIVE_SIZE_BYTES_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getExclusiveSizeBytes()); hash = (37 * hash) + STATE_FIELD_NUMBER; hash = (53 * hash) + state_; if (getReferencingDatabasesCount() > 0) { @@ -1139,6 +1604,10 @@ public int hashCode() { hash = (37 * hash) + ENCRYPTION_INFO_FIELD_NUMBER; hash = (53 * hash) + getEncryptionInfo().hashCode(); } + if (getEncryptionInformationCount() > 0) { + hash = (37 * hash) + ENCRYPTION_INFORMATION_FIELD_NUMBER; + hash = (53 * hash) + getEncryptionInformationList().hashCode(); + } hash = (37 * hash) + DATABASE_DIALECT_FIELD_NUMBER; hash = (53 * hash) + databaseDialect_; if (getReferencingBackupsCount() > 0) { @@ -1149,6 +1618,16 @@ public int hashCode() { hash = (37 * hash) + MAX_EXPIRE_TIME_FIELD_NUMBER; hash = (53 * hash) + getMaxExpireTime().hashCode(); } + if (getBackupSchedulesCount() > 0) { + hash = (37 * hash) + BACKUP_SCHEDULES_FIELD_NUMBER; + hash = (53 * hash) + getBackupSchedulesList().hashCode(); + } + hash = (37 * hash) + INCREMENTAL_BACKUP_CHAIN_ID_FIELD_NUMBER; + hash = (53 * hash) + getIncrementalBackupChainId().hashCode(); + if (hasOldestVersionTime()) { + hash = (37 * hash) + OLDEST_VERSION_TIME_FIELD_NUMBER; + hash = (53 * hash) + getOldestVersionTime().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -1293,7 +1772,9 @@ private void maybeForceBuilderInitialization() { getExpireTimeFieldBuilder(); getCreateTimeFieldBuilder(); getEncryptionInfoFieldBuilder(); + getEncryptionInformationFieldBuilder(); getMaxExpireTimeFieldBuilder(); + getOldestVersionTimeFieldBuilder(); } } @@ -1319,6 +1800,8 @@ public Builder clear() { createTimeBuilder_ = null; } sizeBytes_ = 0L; + freeableSizeBytes_ = 0L; + exclusiveSizeBytes_ = 0L; state_ = 0; referencingDatabases_ = com.google.protobuf.LazyStringArrayList.emptyList(); encryptionInfo_ = null; @@ -1326,6 +1809,13 @@ public Builder clear() { encryptionInfoBuilder_.dispose(); encryptionInfoBuilder_ = null; } + if (encryptionInformationBuilder_ == null) { + encryptionInformation_ = java.util.Collections.emptyList(); + } else { + encryptionInformation_ = null; + encryptionInformationBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000800); databaseDialect_ = 0; referencingBackups_ = com.google.protobuf.LazyStringArrayList.emptyList(); maxExpireTime_ = null; @@ -1333,6 +1823,13 @@ public Builder clear() { maxExpireTimeBuilder_.dispose(); maxExpireTimeBuilder_ = null; } + backupSchedules_ = com.google.protobuf.LazyStringArrayList.emptyList(); + incrementalBackupChainId_ = ""; + oldestVersionTime_ = null; + if (oldestVersionTimeBuilder_ != null) { + oldestVersionTimeBuilder_.dispose(); + oldestVersionTimeBuilder_ = null; + } return this; } @@ -1360,6 +1857,7 @@ public com.google.spanner.admin.database.v1.Backup build() { public com.google.spanner.admin.database.v1.Backup buildPartial() { com.google.spanner.admin.database.v1.Backup result = new com.google.spanner.admin.database.v1.Backup(this); + buildPartialRepeatedFields(result); if (bitField0_ != 0) { buildPartial0(result); } @@ -1367,6 +1865,18 @@ public com.google.spanner.admin.database.v1.Backup buildPartial() { return result; } + private void buildPartialRepeatedFields(com.google.spanner.admin.database.v1.Backup result) { + if (encryptionInformationBuilder_ == null) { + if (((bitField0_ & 0x00000800) != 0)) { + encryptionInformation_ = java.util.Collections.unmodifiableList(encryptionInformation_); + bitField0_ = (bitField0_ & ~0x00000800); + } + result.encryptionInformation_ = encryptionInformation_; + } else { + result.encryptionInformation_ = encryptionInformationBuilder_.build(); + } + } + private void buildPartial0(com.google.spanner.admin.database.v1.Backup result) { int from_bitField0_ = bitField0_; if (((from_bitField0_ & 0x00000001) != 0)) { @@ -1393,29 +1903,49 @@ private void buildPartial0(com.google.spanner.admin.database.v1.Backup result) { result.sizeBytes_ = sizeBytes_; } if (((from_bitField0_ & 0x00000040) != 0)) { - result.state_ = state_; + result.freeableSizeBytes_ = freeableSizeBytes_; } if (((from_bitField0_ & 0x00000080) != 0)) { + result.exclusiveSizeBytes_ = exclusiveSizeBytes_; + } + if (((from_bitField0_ & 0x00000100) != 0)) { + result.state_ = state_; + } + if (((from_bitField0_ & 0x00000200) != 0)) { referencingDatabases_.makeImmutable(); result.referencingDatabases_ = referencingDatabases_; } - if (((from_bitField0_ & 0x00000100) != 0)) { + if (((from_bitField0_ & 0x00000400) != 0)) { result.encryptionInfo_ = encryptionInfoBuilder_ == null ? encryptionInfo_ : encryptionInfoBuilder_.build(); to_bitField0_ |= 0x00000008; } - if (((from_bitField0_ & 0x00000200) != 0)) { + if (((from_bitField0_ & 0x00001000) != 0)) { result.databaseDialect_ = databaseDialect_; } - if (((from_bitField0_ & 0x00000400) != 0)) { + if (((from_bitField0_ & 0x00002000) != 0)) { referencingBackups_.makeImmutable(); result.referencingBackups_ = referencingBackups_; } - if (((from_bitField0_ & 0x00000800) != 0)) { + if (((from_bitField0_ & 0x00004000) != 0)) { result.maxExpireTime_ = maxExpireTimeBuilder_ == null ? maxExpireTime_ : maxExpireTimeBuilder_.build(); to_bitField0_ |= 0x00000010; } + if (((from_bitField0_ & 0x00008000) != 0)) { + backupSchedules_.makeImmutable(); + result.backupSchedules_ = backupSchedules_; + } + if (((from_bitField0_ & 0x00010000) != 0)) { + result.incrementalBackupChainId_ = incrementalBackupChainId_; + } + if (((from_bitField0_ & 0x00020000) != 0)) { + result.oldestVersionTime_ = + oldestVersionTimeBuilder_ == null + ? oldestVersionTime_ + : oldestVersionTimeBuilder_.build(); + to_bitField0_ |= 0x00000020; + } result.bitField0_ |= to_bitField0_; } @@ -1486,13 +2016,19 @@ public Builder mergeFrom(com.google.spanner.admin.database.v1.Backup other) { if (other.getSizeBytes() != 0L) { setSizeBytes(other.getSizeBytes()); } + if (other.getFreeableSizeBytes() != 0L) { + setFreeableSizeBytes(other.getFreeableSizeBytes()); + } + if (other.getExclusiveSizeBytes() != 0L) { + setExclusiveSizeBytes(other.getExclusiveSizeBytes()); + } if (other.state_ != 0) { setStateValue(other.getStateValue()); } if (!other.referencingDatabases_.isEmpty()) { if (referencingDatabases_.isEmpty()) { referencingDatabases_ = other.referencingDatabases_; - bitField0_ |= 0x00000080; + bitField0_ |= 0x00000200; } else { ensureReferencingDatabasesIsMutable(); referencingDatabases_.addAll(other.referencingDatabases_); @@ -1502,13 +2038,40 @@ public Builder mergeFrom(com.google.spanner.admin.database.v1.Backup other) { if (other.hasEncryptionInfo()) { mergeEncryptionInfo(other.getEncryptionInfo()); } - if (other.databaseDialect_ != 0) { - setDatabaseDialectValue(other.getDatabaseDialectValue()); - } - if (!other.referencingBackups_.isEmpty()) { - if (referencingBackups_.isEmpty()) { + if (encryptionInformationBuilder_ == null) { + if (!other.encryptionInformation_.isEmpty()) { + if (encryptionInformation_.isEmpty()) { + encryptionInformation_ = other.encryptionInformation_; + bitField0_ = (bitField0_ & ~0x00000800); + } else { + ensureEncryptionInformationIsMutable(); + encryptionInformation_.addAll(other.encryptionInformation_); + } + onChanged(); + } + } else { + if (!other.encryptionInformation_.isEmpty()) { + if (encryptionInformationBuilder_.isEmpty()) { + encryptionInformationBuilder_.dispose(); + encryptionInformationBuilder_ = null; + encryptionInformation_ = other.encryptionInformation_; + bitField0_ = (bitField0_ & ~0x00000800); + encryptionInformationBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getEncryptionInformationFieldBuilder() + : null; + } else { + encryptionInformationBuilder_.addAllMessages(other.encryptionInformation_); + } + } + } + if (other.databaseDialect_ != 0) { + setDatabaseDialectValue(other.getDatabaseDialectValue()); + } + if (!other.referencingBackups_.isEmpty()) { + if (referencingBackups_.isEmpty()) { referencingBackups_ = other.referencingBackups_; - bitField0_ |= 0x00000400; + bitField0_ |= 0x00002000; } else { ensureReferencingBackupsIsMutable(); referencingBackups_.addAll(other.referencingBackups_); @@ -1518,6 +2081,24 @@ public Builder mergeFrom(com.google.spanner.admin.database.v1.Backup other) { if (other.hasMaxExpireTime()) { mergeMaxExpireTime(other.getMaxExpireTime()); } + if (!other.backupSchedules_.isEmpty()) { + if (backupSchedules_.isEmpty()) { + backupSchedules_ = other.backupSchedules_; + bitField0_ |= 0x00008000; + } else { + ensureBackupSchedulesIsMutable(); + backupSchedules_.addAll(other.backupSchedules_); + } + onChanged(); + } + if (!other.getIncrementalBackupChainId().isEmpty()) { + incrementalBackupChainId_ = other.incrementalBackupChainId_; + bitField0_ |= 0x00010000; + onChanged(); + } + if (other.hasOldestVersionTime()) { + mergeOldestVersionTime(other.getOldestVersionTime()); + } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; @@ -1577,7 +2158,7 @@ public Builder mergeFrom( case 48: { state_ = input.readEnum(); - bitField0_ |= 0x00000040; + bitField0_ |= 0x00000100; break; } // case 48 case 58: @@ -1590,7 +2171,7 @@ public Builder mergeFrom( case 66: { input.readMessage(getEncryptionInfoFieldBuilder().getBuilder(), extensionRegistry); - bitField0_ |= 0x00000100; + bitField0_ |= 0x00000400; break; } // case 66 case 74: @@ -1602,7 +2183,7 @@ public Builder mergeFrom( case 80: { databaseDialect_ = input.readEnum(); - bitField0_ |= 0x00000200; + bitField0_ |= 0x00001000; break; } // case 80 case 90: @@ -1615,9 +2196,55 @@ public Builder mergeFrom( case 98: { input.readMessage(getMaxExpireTimeFieldBuilder().getBuilder(), extensionRegistry); - bitField0_ |= 0x00000800; + bitField0_ |= 0x00004000; break; } // case 98 + case 106: + { + com.google.spanner.admin.database.v1.EncryptionInfo m = + input.readMessage( + com.google.spanner.admin.database.v1.EncryptionInfo.parser(), + extensionRegistry); + if (encryptionInformationBuilder_ == null) { + ensureEncryptionInformationIsMutable(); + encryptionInformation_.add(m); + } else { + encryptionInformationBuilder_.addMessage(m); + } + break; + } // case 106 + case 114: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureBackupSchedulesIsMutable(); + backupSchedules_.add(s); + break; + } // case 114 + case 120: + { + freeableSizeBytes_ = input.readInt64(); + bitField0_ |= 0x00000040; + break; + } // case 120 + case 128: + { + exclusiveSizeBytes_ = input.readInt64(); + bitField0_ |= 0x00000080; + break; + } // case 128 + case 138: + { + incrementalBackupChainId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00010000; + break; + } // case 138 + case 146: + { + input.readMessage( + getOldestVersionTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00020000; + break; + } // case 146 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { @@ -1642,10 +2269,10 @@ public Builder mergeFrom( * * *
    -     * Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    -     * Name of the database from which this backup was
    -     * created. This needs to be in the same instance as the backup.
    -     * Values are of the form
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. Name of the database from which this backup was created. This
    +     * needs to be in the same instance as the backup. Values are of the form
          * `projects/<project>/instances/<instance>/databases/<database>`.
          * 
    * @@ -1668,10 +2295,10 @@ public java.lang.String getDatabase() { * * *
    -     * Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    -     * Name of the database from which this backup was
    -     * created. This needs to be in the same instance as the backup.
    -     * Values are of the form
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. Name of the database from which this backup was created. This
    +     * needs to be in the same instance as the backup. Values are of the form
          * `projects/<project>/instances/<instance>/databases/<database>`.
          * 
    * @@ -1694,10 +2321,10 @@ public com.google.protobuf.ByteString getDatabaseBytes() { * * *
    -     * Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    -     * Name of the database from which this backup was
    -     * created. This needs to be in the same instance as the backup.
    -     * Values are of the form
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. Name of the database from which this backup was created. This
    +     * needs to be in the same instance as the backup. Values are of the form
          * `projects/<project>/instances/<instance>/databases/<database>`.
          * 
    * @@ -1719,10 +2346,10 @@ public Builder setDatabase(java.lang.String value) { * * *
    -     * Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    -     * Name of the database from which this backup was
    -     * created. This needs to be in the same instance as the backup.
    -     * Values are of the form
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. Name of the database from which this backup was created. This
    +     * needs to be in the same instance as the backup. Values are of the form
          * `projects/<project>/instances/<instance>/databases/<database>`.
          * 
    * @@ -1740,10 +2367,10 @@ public Builder clearDatabase() { * * *
    -     * Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    -     * Name of the database from which this backup was
    -     * created. This needs to be in the same instance as the backup.
    -     * Values are of the form
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. Name of the database from which this backup was created. This
    +     * needs to be in the same instance as the backup. Values are of the form
          * `projects/<project>/instances/<instance>/databases/<database>`.
          * 
    * @@ -1985,7 +2612,8 @@ public com.google.protobuf.TimestampOrBuilder getVersionTimeOrBuilder() { * * *
    -     * Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
          * operation. The expiration time of the backup, with microseconds
          * granularity that must be at least 6 hours and at most 366 days
          * from the time the CreateBackup request is processed. Once the `expire_time`
    @@ -2004,7 +2632,8 @@ public boolean hasExpireTime() {
          *
          *
          * 
    -     * Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
          * operation. The expiration time of the backup, with microseconds
          * granularity that must be at least 6 hours and at most 366 days
          * from the time the CreateBackup request is processed. Once the `expire_time`
    @@ -2029,7 +2658,8 @@ public com.google.protobuf.Timestamp getExpireTime() {
          *
          *
          * 
    -     * Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
          * operation. The expiration time of the backup, with microseconds
          * granularity that must be at least 6 hours and at most 366 days
          * from the time the CreateBackup request is processed. Once the `expire_time`
    @@ -2056,7 +2686,8 @@ public Builder setExpireTime(com.google.protobuf.Timestamp value) {
          *
          *
          * 
    -     * Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
          * operation. The expiration time of the backup, with microseconds
          * granularity that must be at least 6 hours and at most 366 days
          * from the time the CreateBackup request is processed. Once the `expire_time`
    @@ -2080,7 +2711,8 @@ public Builder setExpireTime(com.google.protobuf.Timestamp.Builder builderForVal
          *
          *
          * 
    -     * Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
          * operation. The expiration time of the backup, with microseconds
          * granularity that must be at least 6 hours and at most 366 days
          * from the time the CreateBackup request is processed. Once the `expire_time`
    @@ -2112,7 +2744,8 @@ public Builder mergeExpireTime(com.google.protobuf.Timestamp value) {
          *
          *
          * 
    -     * Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
          * operation. The expiration time of the backup, with microseconds
          * granularity that must be at least 6 hours and at most 366 days
          * from the time the CreateBackup request is processed. Once the `expire_time`
    @@ -2136,7 +2769,8 @@ public Builder clearExpireTime() {
          *
          *
          * 
    -     * Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
          * operation. The expiration time of the backup, with microseconds
          * granularity that must be at least 6 hours and at most 366 days
          * from the time the CreateBackup request is processed. Once the `expire_time`
    @@ -2155,7 +2789,8 @@ public com.google.protobuf.Timestamp.Builder getExpireTimeBuilder() {
          *
          *
          * 
    -     * Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
          * operation. The expiration time of the backup, with microseconds
          * granularity that must be at least 6 hours and at most 366 days
          * from the time the CreateBackup request is processed. Once the `expire_time`
    @@ -2178,7 +2813,8 @@ public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() {
          *
          *
          * 
    -     * Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * Required for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
          * operation. The expiration time of the backup, with microseconds
          * granularity that must be at least 6 hours and at most 366 days
          * from the time the CreateBackup request is processed. Once the `expire_time`
    @@ -2210,8 +2846,11 @@ public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() {
          *
          *
          * 
    -     * Output only for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    -     * Required for the [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup] operation.
    +     * Output only for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. Required for the
    +     * [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]
    +     * operation.
          *
          * A globally unique identifier for the backup which cannot be
          * changed. Values are of the form
    @@ -2244,8 +2883,11 @@ public java.lang.String getName() {
          *
          *
          * 
    -     * Output only for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    -     * Required for the [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup] operation.
    +     * Output only for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. Required for the
    +     * [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]
    +     * operation.
          *
          * A globally unique identifier for the backup which cannot be
          * changed. Values are of the form
    @@ -2278,8 +2920,11 @@ public com.google.protobuf.ByteString getNameBytes() {
          *
          *
          * 
    -     * Output only for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    -     * Required for the [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup] operation.
    +     * Output only for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. Required for the
    +     * [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]
    +     * operation.
          *
          * A globally unique identifier for the backup which cannot be
          * changed. Values are of the form
    @@ -2311,8 +2956,11 @@ public Builder setName(java.lang.String value) {
          *
          *
          * 
    -     * Output only for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    -     * Required for the [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup] operation.
    +     * Output only for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. Required for the
    +     * [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]
    +     * operation.
          *
          * A globally unique identifier for the backup which cannot be
          * changed. Values are of the form
    @@ -2340,8 +2988,11 @@ public Builder clearName() {
          *
          *
          * 
    -     * Output only for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    -     * Required for the [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup] operation.
    +     * Output only for the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation. Required for the
    +     * [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]
    +     * operation.
          *
          * A globally unique identifier for the backup which cannot be
          * changed. Values are of the form
    @@ -2381,7 +3032,8 @@ public Builder setNameBytes(com.google.protobuf.ByteString value) {
          *
          *
          * 
    -     * Output only. The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * Output only. The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
          * request is received. If the request does not specify `version_time`, the
          * `version_time` of the backup will be equivalent to the `create_time`.
          * 
    @@ -2399,7 +3051,8 @@ public boolean hasCreateTime() { * * *
    -     * Output only. The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * Output only. The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
          * request is received. If the request does not specify `version_time`, the
          * `version_time` of the backup will be equivalent to the `create_time`.
          * 
    @@ -2423,7 +3076,8 @@ public com.google.protobuf.Timestamp getCreateTime() { * * *
    -     * Output only. The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * Output only. The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
          * request is received. If the request does not specify `version_time`, the
          * `version_time` of the backup will be equivalent to the `create_time`.
          * 
    @@ -2449,7 +3103,8 @@ public Builder setCreateTime(com.google.protobuf.Timestamp value) { * * *
    -     * Output only. The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * Output only. The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
          * request is received. If the request does not specify `version_time`, the
          * `version_time` of the backup will be equivalent to the `create_time`.
          * 
    @@ -2472,7 +3127,8 @@ public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForVal * * *
    -     * Output only. The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * Output only. The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
          * request is received. If the request does not specify `version_time`, the
          * `version_time` of the backup will be equivalent to the `create_time`.
          * 
    @@ -2503,7 +3159,8 @@ public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { * * *
    -     * Output only. The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * Output only. The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
          * request is received. If the request does not specify `version_time`, the
          * `version_time` of the backup will be equivalent to the `create_time`.
          * 
    @@ -2526,7 +3183,8 @@ public Builder clearCreateTime() { * * *
    -     * Output only. The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * Output only. The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
          * request is received. If the request does not specify `version_time`, the
          * `version_time` of the backup will be equivalent to the `create_time`.
          * 
    @@ -2544,7 +3202,8 @@ public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { * * *
    -     * Output only. The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * Output only. The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
          * request is received. If the request does not specify `version_time`, the
          * `version_time` of the backup will be equivalent to the `create_time`.
          * 
    @@ -2566,7 +3225,8 @@ public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { * * *
    -     * Output only. The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * Output only. The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
          * request is received. If the request does not specify `version_time`, the
          * `version_time` of the backup will be equivalent to the `create_time`.
          * 
    @@ -2645,6 +3305,148 @@ public Builder clearSizeBytes() { return this; } + private long freeableSizeBytes_; + /** + * + * + *
    +     * Output only. The number of bytes that will be freed by deleting this
    +     * backup. This value will be zero if, for example, this backup is part of an
    +     * incremental backup chain and younger backups in the chain require that we
    +     * keep its data. For backups not in an incremental backup chain, this is
    +     * always the size of the backup. This value may change if backups on the same
    +     * chain get created, deleted or expired.
    +     * 
    + * + * int64 freeable_size_bytes = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The freeableSizeBytes. + */ + @java.lang.Override + public long getFreeableSizeBytes() { + return freeableSizeBytes_; + } + /** + * + * + *
    +     * Output only. The number of bytes that will be freed by deleting this
    +     * backup. This value will be zero if, for example, this backup is part of an
    +     * incremental backup chain and younger backups in the chain require that we
    +     * keep its data. For backups not in an incremental backup chain, this is
    +     * always the size of the backup. This value may change if backups on the same
    +     * chain get created, deleted or expired.
    +     * 
    + * + * int64 freeable_size_bytes = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The freeableSizeBytes to set. + * @return This builder for chaining. + */ + public Builder setFreeableSizeBytes(long value) { + + freeableSizeBytes_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + /** + * + * + *
    +     * Output only. The number of bytes that will be freed by deleting this
    +     * backup. This value will be zero if, for example, this backup is part of an
    +     * incremental backup chain and younger backups in the chain require that we
    +     * keep its data. For backups not in an incremental backup chain, this is
    +     * always the size of the backup. This value may change if backups on the same
    +     * chain get created, deleted or expired.
    +     * 
    + * + * int64 freeable_size_bytes = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearFreeableSizeBytes() { + bitField0_ = (bitField0_ & ~0x00000040); + freeableSizeBytes_ = 0L; + onChanged(); + return this; + } + + private long exclusiveSizeBytes_; + /** + * + * + *
    +     * Output only. For a backup in an incremental backup chain, this is the
    +     * storage space needed to keep the data that has changed since the previous
    +     * backup. For all other backups, this is always the size of the backup. This
    +     * value may change if backups on the same chain get deleted or expired.
    +     *
    +     * This field can be used to calculate the total storage space used by a set
    +     * of backups. For example, the total space used by all backups of a database
    +     * can be computed by summing up this field.
    +     * 
    + * + * int64 exclusive_size_bytes = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The exclusiveSizeBytes. + */ + @java.lang.Override + public long getExclusiveSizeBytes() { + return exclusiveSizeBytes_; + } + /** + * + * + *
    +     * Output only. For a backup in an incremental backup chain, this is the
    +     * storage space needed to keep the data that has changed since the previous
    +     * backup. For all other backups, this is always the size of the backup. This
    +     * value may change if backups on the same chain get deleted or expired.
    +     *
    +     * This field can be used to calculate the total storage space used by a set
    +     * of backups. For example, the total space used by all backups of a database
    +     * can be computed by summing up this field.
    +     * 
    + * + * int64 exclusive_size_bytes = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The exclusiveSizeBytes to set. + * @return This builder for chaining. + */ + public Builder setExclusiveSizeBytes(long value) { + + exclusiveSizeBytes_ = value; + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + /** + * + * + *
    +     * Output only. For a backup in an incremental backup chain, this is the
    +     * storage space needed to keep the data that has changed since the previous
    +     * backup. For all other backups, this is always the size of the backup. This
    +     * value may change if backups on the same chain get deleted or expired.
    +     *
    +     * This field can be used to calculate the total storage space used by a set
    +     * of backups. For example, the total space used by all backups of a database
    +     * can be computed by summing up this field.
    +     * 
    + * + * int64 exclusive_size_bytes = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearExclusiveSizeBytes() { + bitField0_ = (bitField0_ & ~0x00000080); + exclusiveSizeBytes_ = 0L; + onChanged(); + return this; + } + private int state_ = 0; /** * @@ -2679,7 +3481,7 @@ public int getStateValue() { */ public Builder setStateValue(int value) { state_ = value; - bitField0_ |= 0x00000040; + bitField0_ |= 0x00000100; onChanged(); return this; } @@ -2722,7 +3524,7 @@ public Builder setState(com.google.spanner.admin.database.v1.Backup.State value) if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000040; + bitField0_ |= 0x00000100; state_ = value.getNumber(); onChanged(); return this; @@ -2741,7 +3543,7 @@ public Builder setState(com.google.spanner.admin.database.v1.Backup.State value) * @return This builder for chaining. */ public Builder clearState() { - bitField0_ = (bitField0_ & ~0x00000040); + bitField0_ = (bitField0_ & ~0x00000100); state_ = 0; onChanged(); return this; @@ -2754,7 +3556,7 @@ private void ensureReferencingDatabasesIsMutable() { if (!referencingDatabases_.isModifiable()) { referencingDatabases_ = new com.google.protobuf.LazyStringArrayList(referencingDatabases_); } - bitField0_ |= 0x00000080; + bitField0_ |= 0x00000200; } /** * @@ -2874,7 +3676,7 @@ public Builder setReferencingDatabases(int index, java.lang.String value) { } ensureReferencingDatabasesIsMutable(); referencingDatabases_.set(index, value); - bitField0_ |= 0x00000080; + bitField0_ |= 0x00000200; onChanged(); return this; } @@ -2904,7 +3706,7 @@ public Builder addReferencingDatabases(java.lang.String value) { } ensureReferencingDatabasesIsMutable(); referencingDatabases_.add(value); - bitField0_ |= 0x00000080; + bitField0_ |= 0x00000200; onChanged(); return this; } @@ -2931,7 +3733,7 @@ public Builder addReferencingDatabases(java.lang.String value) { public Builder addAllReferencingDatabases(java.lang.Iterable values) { ensureReferencingDatabasesIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll(values, referencingDatabases_); - bitField0_ |= 0x00000080; + bitField0_ |= 0x00000200; onChanged(); return this; } @@ -2956,7 +3758,7 @@ public Builder addAllReferencingDatabases(java.lang.Iterable v */ public Builder clearReferencingDatabases() { referencingDatabases_ = com.google.protobuf.LazyStringArrayList.emptyList(); - bitField0_ = (bitField0_ & ~0x00000080); + bitField0_ = (bitField0_ & ~0x00000200); ; onChanged(); return this; @@ -2988,7 +3790,7 @@ public Builder addReferencingDatabasesBytes(com.google.protobuf.ByteString value checkByteStringIsUtf8(value); ensureReferencingDatabasesIsMutable(); referencingDatabases_.add(value); - bitField0_ |= 0x00000080; + bitField0_ |= 0x00000200; onChanged(); return this; } @@ -3013,7 +3815,7 @@ public Builder addReferencingDatabasesBytes(com.google.protobuf.ByteString value * @return Whether the encryptionInfo field is set. */ public boolean hasEncryptionInfo() { - return ((bitField0_ & 0x00000100) != 0); + return ((bitField0_ & 0x00000400) != 0); } /** * @@ -3057,7 +3859,7 @@ public Builder setEncryptionInfo(com.google.spanner.admin.database.v1.Encryption } else { encryptionInfoBuilder_.setMessage(value); } - bitField0_ |= 0x00000100; + bitField0_ |= 0x00000400; onChanged(); return this; } @@ -3079,7 +3881,7 @@ public Builder setEncryptionInfo( } else { encryptionInfoBuilder_.setMessage(builderForValue.build()); } - bitField0_ |= 0x00000100; + bitField0_ |= 0x00000400; onChanged(); return this; } @@ -3096,7 +3898,7 @@ public Builder setEncryptionInfo( */ public Builder mergeEncryptionInfo(com.google.spanner.admin.database.v1.EncryptionInfo value) { if (encryptionInfoBuilder_ == null) { - if (((bitField0_ & 0x00000100) != 0) + if (((bitField0_ & 0x00000400) != 0) && encryptionInfo_ != null && encryptionInfo_ != com.google.spanner.admin.database.v1.EncryptionInfo.getDefaultInstance()) { @@ -3108,7 +3910,7 @@ public Builder mergeEncryptionInfo(com.google.spanner.admin.database.v1.Encrypti encryptionInfoBuilder_.mergeFrom(value); } if (encryptionInfo_ != null) { - bitField0_ |= 0x00000100; + bitField0_ |= 0x00000400; onChanged(); } return this; @@ -3125,7 +3927,7 @@ public Builder mergeEncryptionInfo(com.google.spanner.admin.database.v1.Encrypti * */ public Builder clearEncryptionInfo() { - bitField0_ = (bitField0_ & ~0x00000100); + bitField0_ = (bitField0_ & ~0x00000400); encryptionInfo_ = null; if (encryptionInfoBuilder_ != null) { encryptionInfoBuilder_.dispose(); @@ -3146,7 +3948,7 @@ public Builder clearEncryptionInfo() { * */ public com.google.spanner.admin.database.v1.EncryptionInfo.Builder getEncryptionInfoBuilder() { - bitField0_ |= 0x00000100; + bitField0_ |= 0x00000400; onChanged(); return getEncryptionInfoFieldBuilder().getBuilder(); } @@ -3199,166 +4001,816 @@ public com.google.spanner.admin.database.v1.EncryptionInfo.Builder getEncryption return encryptionInfoBuilder_; } - private int databaseDialect_ = 0; + private java.util.List + encryptionInformation_ = java.util.Collections.emptyList(); + + private void ensureEncryptionInformationIsMutable() { + if (!((bitField0_ & 0x00000800) != 0)) { + encryptionInformation_ = + new java.util.ArrayList( + encryptionInformation_); + bitField0_ |= 0x00000800; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.spanner.admin.database.v1.EncryptionInfo, + com.google.spanner.admin.database.v1.EncryptionInfo.Builder, + com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder> + encryptionInformationBuilder_; + /** * * *
    -     * Output only. The database dialect information for the backup.
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
          * 
    * * - * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; * - * - * @return The enum numeric value on the wire for databaseDialect. */ - @java.lang.Override - public int getDatabaseDialectValue() { - return databaseDialect_; + public java.util.List + getEncryptionInformationList() { + if (encryptionInformationBuilder_ == null) { + return java.util.Collections.unmodifiableList(encryptionInformation_); + } else { + return encryptionInformationBuilder_.getMessageList(); + } } /** * * *
    -     * Output only. The database dialect information for the backup.
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
          * 
    * * - * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; * - * - * @param value The enum numeric value on the wire for databaseDialect to set. - * @return This builder for chaining. */ - public Builder setDatabaseDialectValue(int value) { - databaseDialect_ = value; - bitField0_ |= 0x00000200; - onChanged(); - return this; + public int getEncryptionInformationCount() { + if (encryptionInformationBuilder_ == null) { + return encryptionInformation_.size(); + } else { + return encryptionInformationBuilder_.getCount(); + } } /** * * *
    -     * Output only. The database dialect information for the backup.
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
          * 
    * * - * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; * - * - * @return The databaseDialect. */ - @java.lang.Override - public com.google.spanner.admin.database.v1.DatabaseDialect getDatabaseDialect() { - com.google.spanner.admin.database.v1.DatabaseDialect result = - com.google.spanner.admin.database.v1.DatabaseDialect.forNumber(databaseDialect_); - return result == null - ? com.google.spanner.admin.database.v1.DatabaseDialect.UNRECOGNIZED - : result; + public com.google.spanner.admin.database.v1.EncryptionInfo getEncryptionInformation(int index) { + if (encryptionInformationBuilder_ == null) { + return encryptionInformation_.get(index); + } else { + return encryptionInformationBuilder_.getMessage(index); + } } /** * * *
    -     * Output only. The database dialect information for the backup.
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
          * 
    * * - * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; * - * - * @param value The databaseDialect to set. - * @return This builder for chaining. */ - public Builder setDatabaseDialect(com.google.spanner.admin.database.v1.DatabaseDialect value) { - if (value == null) { - throw new NullPointerException(); + public Builder setEncryptionInformation( + int index, com.google.spanner.admin.database.v1.EncryptionInfo value) { + if (encryptionInformationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEncryptionInformationIsMutable(); + encryptionInformation_.set(index, value); + onChanged(); + } else { + encryptionInformationBuilder_.setMessage(index, value); } - bitField0_ |= 0x00000200; - databaseDialect_ = value.getNumber(); - onChanged(); return this; } /** * * *
    -     * Output only. The database dialect information for the backup.
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
          * 
    * * - * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; * - * - * @return This builder for chaining. */ - public Builder clearDatabaseDialect() { - bitField0_ = (bitField0_ & ~0x00000200); - databaseDialect_ = 0; - onChanged(); - return this; - } - - private com.google.protobuf.LazyStringArrayList referencingBackups_ = - com.google.protobuf.LazyStringArrayList.emptyList(); - - private void ensureReferencingBackupsIsMutable() { - if (!referencingBackups_.isModifiable()) { - referencingBackups_ = new com.google.protobuf.LazyStringArrayList(referencingBackups_); + public Builder setEncryptionInformation( + int index, com.google.spanner.admin.database.v1.EncryptionInfo.Builder builderForValue) { + if (encryptionInformationBuilder_ == null) { + ensureEncryptionInformationIsMutable(); + encryptionInformation_.set(index, builderForValue.build()); + onChanged(); + } else { + encryptionInformationBuilder_.setMessage(index, builderForValue.build()); } - bitField0_ |= 0x00000400; + return this; } /** * * *
    -     * Output only. The names of the destination backups being created by copying
    -     * this source backup. The backup names are of the form
    -     * `projects/<project>/instances/<instance>/backups/<backup>`.
    -     * Referencing backups may exist in different instances. The existence of
    -     * any referencing backup prevents the backup from being deleted. When the
    -     * copy operation is done (either successfully completed or cancelled or the
    -     * destination backup is deleted), the reference to the backup is removed.
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
          * 
    * * - * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; * - * - * @return A list containing the referencingBackups. */ - public com.google.protobuf.ProtocolStringList getReferencingBackupsList() { - referencingBackups_.makeImmutable(); - return referencingBackups_; + public Builder addEncryptionInformation( + com.google.spanner.admin.database.v1.EncryptionInfo value) { + if (encryptionInformationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureEncryptionInformationIsMutable(); + encryptionInformation_.add(value); + onChanged(); + } else { + encryptionInformationBuilder_.addMessage(value); + } + return this; } /** * * *
    -     * Output only. The names of the destination backups being created by copying
    -     * this source backup. The backup names are of the form
    -     * `projects/<project>/instances/<instance>/backups/<backup>`.
    -     * Referencing backups may exist in different instances. The existence of
    -     * any referencing backup prevents the backup from being deleted. When the
    -     * copy operation is done (either successfully completed or cancelled or the
    -     * destination backup is deleted), the reference to the backup is removed.
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
          * 
    * * - * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; * - * - * @return The count of referencingBackups. */ - public int getReferencingBackupsCount() { - return referencingBackups_.size(); - } - /** - * - * - *
    +    public Builder addEncryptionInformation(
    +        int index, com.google.spanner.admin.database.v1.EncryptionInfo value) {
    +      if (encryptionInformationBuilder_ == null) {
    +        if (value == null) {
    +          throw new NullPointerException();
    +        }
    +        ensureEncryptionInformationIsMutable();
    +        encryptionInformation_.add(index, value);
    +        onChanged();
    +      } else {
    +        encryptionInformationBuilder_.addMessage(index, value);
    +      }
    +      return this;
    +    }
    +    /**
    +     *
    +     *
    +     * 
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addEncryptionInformation( + com.google.spanner.admin.database.v1.EncryptionInfo.Builder builderForValue) { + if (encryptionInformationBuilder_ == null) { + ensureEncryptionInformationIsMutable(); + encryptionInformation_.add(builderForValue.build()); + onChanged(); + } else { + encryptionInformationBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addEncryptionInformation( + int index, com.google.spanner.admin.database.v1.EncryptionInfo.Builder builderForValue) { + if (encryptionInformationBuilder_ == null) { + ensureEncryptionInformationIsMutable(); + encryptionInformation_.add(index, builderForValue.build()); + onChanged(); + } else { + encryptionInformationBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addAllEncryptionInformation( + java.lang.Iterable values) { + if (encryptionInformationBuilder_ == null) { + ensureEncryptionInformationIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, encryptionInformation_); + onChanged(); + } else { + encryptionInformationBuilder_.addAllMessages(values); + } + return this; + } + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearEncryptionInformation() { + if (encryptionInformationBuilder_ == null) { + encryptionInformation_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000800); + onChanged(); + } else { + encryptionInformationBuilder_.clear(); + } + return this; + } + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder removeEncryptionInformation(int index) { + if (encryptionInformationBuilder_ == null) { + ensureEncryptionInformationIsMutable(); + encryptionInformation_.remove(index); + onChanged(); + } else { + encryptionInformationBuilder_.remove(index); + } + return this; + } + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.EncryptionInfo.Builder + getEncryptionInformationBuilder(int index) { + return getEncryptionInformationFieldBuilder().getBuilder(index); + } + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder + getEncryptionInformationOrBuilder(int index) { + if (encryptionInformationBuilder_ == null) { + return encryptionInformation_.get(index); + } else { + return encryptionInformationBuilder_.getMessageOrBuilder(index); + } + } + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getEncryptionInformationOrBuilderList() { + if (encryptionInformationBuilder_ != null) { + return encryptionInformationBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(encryptionInformation_); + } + } + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.EncryptionInfo.Builder + addEncryptionInformationBuilder() { + return getEncryptionInformationFieldBuilder() + .addBuilder(com.google.spanner.admin.database.v1.EncryptionInfo.getDefaultInstance()); + } + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.database.v1.EncryptionInfo.Builder + addEncryptionInformationBuilder(int index) { + return getEncryptionInformationFieldBuilder() + .addBuilder( + index, com.google.spanner.admin.database.v1.EncryptionInfo.getDefaultInstance()); + } + /** + * + * + *
    +     * Output only. The encryption information for the backup, whether it is
    +     * protected by one or more KMS keys. The information includes all Cloud
    +     * KMS key versions used to encrypt the backup. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated. At least one of the key
    +     * versions must be available for the backup to be restored. If a key version
    +     * is revoked in the middle of a restore, the restore behavior is undefined.
    +     * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getEncryptionInformationBuilderList() { + return getEncryptionInformationFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.spanner.admin.database.v1.EncryptionInfo, + com.google.spanner.admin.database.v1.EncryptionInfo.Builder, + com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder> + getEncryptionInformationFieldBuilder() { + if (encryptionInformationBuilder_ == null) { + encryptionInformationBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.spanner.admin.database.v1.EncryptionInfo, + com.google.spanner.admin.database.v1.EncryptionInfo.Builder, + com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder>( + encryptionInformation_, + ((bitField0_ & 0x00000800) != 0), + getParentForChildren(), + isClean()); + encryptionInformation_ = null; + } + return encryptionInformationBuilder_; + } + + private int databaseDialect_ = 0; + /** + * + * + *
    +     * Output only. The database dialect information for the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The enum numeric value on the wire for databaseDialect. + */ + @java.lang.Override + public int getDatabaseDialectValue() { + return databaseDialect_; + } + /** + * + * + *
    +     * Output only. The database dialect information for the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The enum numeric value on the wire for databaseDialect to set. + * @return This builder for chaining. + */ + public Builder setDatabaseDialectValue(int value) { + databaseDialect_ = value; + bitField0_ |= 0x00001000; + onChanged(); + return this; + } + /** + * + * + *
    +     * Output only. The database dialect information for the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The databaseDialect. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.DatabaseDialect getDatabaseDialect() { + com.google.spanner.admin.database.v1.DatabaseDialect result = + com.google.spanner.admin.database.v1.DatabaseDialect.forNumber(databaseDialect_); + return result == null + ? com.google.spanner.admin.database.v1.DatabaseDialect.UNRECOGNIZED + : result; + } + /** + * + * + *
    +     * Output only. The database dialect information for the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The databaseDialect to set. + * @return This builder for chaining. + */ + public Builder setDatabaseDialect(com.google.spanner.admin.database.v1.DatabaseDialect value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00001000; + databaseDialect_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * + *
    +     * Output only. The database dialect information for the backup.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.DatabaseDialect database_dialect = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearDatabaseDialect() { + bitField0_ = (bitField0_ & ~0x00001000); + databaseDialect_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringArrayList referencingBackups_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureReferencingBackupsIsMutable() { + if (!referencingBackups_.isModifiable()) { + referencingBackups_ = new com.google.protobuf.LazyStringArrayList(referencingBackups_); + } + bitField0_ |= 0x00002000; + } + /** + * + * + *
    +     * Output only. The names of the destination backups being created by copying
    +     * this source backup. The backup names are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * Referencing backups may exist in different instances. The existence of
    +     * any referencing backup prevents the backup from being deleted. When the
    +     * copy operation is done (either successfully completed or cancelled or the
    +     * destination backup is deleted), the reference to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the referencingBackups. + */ + public com.google.protobuf.ProtocolStringList getReferencingBackupsList() { + referencingBackups_.makeImmutable(); + return referencingBackups_; + } + /** + * + * + *
    +     * Output only. The names of the destination backups being created by copying
    +     * this source backup. The backup names are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * Referencing backups may exist in different instances. The existence of
    +     * any referencing backup prevents the backup from being deleted. When the
    +     * copy operation is done (either successfully completed or cancelled or the
    +     * destination backup is deleted), the reference to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The count of referencingBackups. + */ + public int getReferencingBackupsCount() { + return referencingBackups_.size(); + } + /** + * + * + *
    +     * Output only. The names of the destination backups being created by copying
    +     * this source backup. The backup names are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * Referencing backups may exist in different instances. The existence of
    +     * any referencing backup prevents the backup from being deleted. When the
    +     * copy operation is done (either successfully completed or cancelled or the
    +     * destination backup is deleted), the reference to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The referencingBackups at the given index. + */ + public java.lang.String getReferencingBackups(int index) { + return referencingBackups_.get(index); + } + /** + * + * + *
    +     * Output only. The names of the destination backups being created by copying
    +     * this source backup. The backup names are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * Referencing backups may exist in different instances. The existence of
    +     * any referencing backup prevents the backup from being deleted. When the
    +     * copy operation is done (either successfully completed or cancelled or the
    +     * destination backup is deleted), the reference to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the referencingBackups at the given index. + */ + public com.google.protobuf.ByteString getReferencingBackupsBytes(int index) { + return referencingBackups_.getByteString(index); + } + /** + * + * + *
    +     * Output only. The names of the destination backups being created by copying
    +     * this source backup. The backup names are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * Referencing backups may exist in different instances. The existence of
    +     * any referencing backup prevents the backup from being deleted. When the
    +     * copy operation is done (either successfully completed or cancelled or the
    +     * destination backup is deleted), the reference to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index to set the value at. + * @param value The referencingBackups to set. + * @return This builder for chaining. + */ + public Builder setReferencingBackups(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureReferencingBackupsIsMutable(); + referencingBackups_.set(index, value); + bitField0_ |= 0x00002000; + onChanged(); + return this; + } + /** + * + * + *
    +     * Output only. The names of the destination backups being created by copying
    +     * this source backup. The backup names are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * Referencing backups may exist in different instances. The existence of
    +     * any referencing backup prevents the backup from being deleted. When the
    +     * copy operation is done (either successfully completed or cancelled or the
    +     * destination backup is deleted), the reference to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param value The referencingBackups to add. + * @return This builder for chaining. + */ + public Builder addReferencingBackups(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureReferencingBackupsIsMutable(); + referencingBackups_.add(value); + bitField0_ |= 0x00002000; + onChanged(); + return this; + } + /** + * + * + *
    +     * Output only. The names of the destination backups being created by copying
    +     * this source backup. The backup names are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * Referencing backups may exist in different instances. The existence of
    +     * any referencing backup prevents the backup from being deleted. When the
    +     * copy operation is done (either successfully completed or cancelled or the
    +     * destination backup is deleted), the reference to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param values The referencingBackups to add. + * @return This builder for chaining. + */ + public Builder addAllReferencingBackups(java.lang.Iterable values) { + ensureReferencingBackupsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, referencingBackups_); + bitField0_ |= 0x00002000; + onChanged(); + return this; + } + /** + * + * + *
    +     * Output only. The names of the destination backups being created by copying
    +     * this source backup. The backup names are of the form
    +     * `projects/<project>/instances/<instance>/backups/<backup>`.
    +     * Referencing backups may exist in different instances. The existence of
    +     * any referencing backup prevents the backup from being deleted. When the
    +     * copy operation is done (either successfully completed or cancelled or the
    +     * destination backup is deleted), the reference to the backup is removed.
    +     * 
    + * + * + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearReferencingBackups() { + referencingBackups_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00002000); + ; + onChanged(); + return this; + } + /** + * + * + *
          * Output only. The names of the destination backups being created by copying
          * this source backup. The backup names are of the form
          * `projects/<project>/instances/<instance>/backups/<backup>`.
    @@ -3369,66 +4821,401 @@ public int getReferencingBackupsCount() {
          * 
    * * - * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes of the referencingBackups to add. + * @return This builder for chaining. + */ + public Builder addReferencingBackupsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureReferencingBackupsIsMutable(); + referencingBackups_.add(value); + bitField0_ |= 0x00002000; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp maxExpireTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + maxExpireTimeBuilder_; + /** + * + * + *
    +     * Output only. The max allowed expiration time of the backup, with
    +     * microseconds granularity. A backup's expiration time can be configured in
    +     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    +     * copying an existing backup, the expiration time specified must be
    +     * less than `Backup.max_expire_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; * * - * @param index The index of the element to return. - * @return The referencingBackups at the given index. + * @return Whether the maxExpireTime field is set. */ - public java.lang.String getReferencingBackups(int index) { - return referencingBackups_.get(index); + public boolean hasMaxExpireTime() { + return ((bitField0_ & 0x00004000) != 0); + } + /** + * + * + *
    +     * Output only. The max allowed expiration time of the backup, with
    +     * microseconds granularity. A backup's expiration time can be configured in
    +     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    +     * copying an existing backup, the expiration time specified must be
    +     * less than `Backup.max_expire_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The maxExpireTime. + */ + public com.google.protobuf.Timestamp getMaxExpireTime() { + if (maxExpireTimeBuilder_ == null) { + return maxExpireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : maxExpireTime_; + } else { + return maxExpireTimeBuilder_.getMessage(); + } + } + /** + * + * + *
    +     * Output only. The max allowed expiration time of the backup, with
    +     * microseconds granularity. A backup's expiration time can be configured in
    +     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    +     * copying an existing backup, the expiration time specified must be
    +     * less than `Backup.max_expire_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setMaxExpireTime(com.google.protobuf.Timestamp value) { + if (maxExpireTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + maxExpireTime_ = value; + } else { + maxExpireTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00004000; + onChanged(); + return this; + } + /** + * + * + *
    +     * Output only. The max allowed expiration time of the backup, with
    +     * microseconds granularity. A backup's expiration time can be configured in
    +     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    +     * copying an existing backup, the expiration time specified must be
    +     * less than `Backup.max_expire_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setMaxExpireTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (maxExpireTimeBuilder_ == null) { + maxExpireTime_ = builderForValue.build(); + } else { + maxExpireTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00004000; + onChanged(); + return this; + } + /** + * + * + *
    +     * Output only. The max allowed expiration time of the backup, with
    +     * microseconds granularity. A backup's expiration time can be configured in
    +     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    +     * copying an existing backup, the expiration time specified must be
    +     * less than `Backup.max_expire_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeMaxExpireTime(com.google.protobuf.Timestamp value) { + if (maxExpireTimeBuilder_ == null) { + if (((bitField0_ & 0x00004000) != 0) + && maxExpireTime_ != null + && maxExpireTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getMaxExpireTimeBuilder().mergeFrom(value); + } else { + maxExpireTime_ = value; + } + } else { + maxExpireTimeBuilder_.mergeFrom(value); + } + if (maxExpireTime_ != null) { + bitField0_ |= 0x00004000; + onChanged(); + } + return this; + } + /** + * + * + *
    +     * Output only. The max allowed expiration time of the backup, with
    +     * microseconds granularity. A backup's expiration time can be configured in
    +     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    +     * copying an existing backup, the expiration time specified must be
    +     * less than `Backup.max_expire_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearMaxExpireTime() { + bitField0_ = (bitField0_ & ~0x00004000); + maxExpireTime_ = null; + if (maxExpireTimeBuilder_ != null) { + maxExpireTimeBuilder_.dispose(); + maxExpireTimeBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
    +     * Output only. The max allowed expiration time of the backup, with
    +     * microseconds granularity. A backup's expiration time can be configured in
    +     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    +     * copying an existing backup, the expiration time specified must be
    +     * less than `Backup.max_expire_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getMaxExpireTimeBuilder() { + bitField0_ |= 0x00004000; + onChanged(); + return getMaxExpireTimeFieldBuilder().getBuilder(); + } + /** + * + * + *
    +     * Output only. The max allowed expiration time of the backup, with
    +     * microseconds granularity. A backup's expiration time can be configured in
    +     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    +     * copying an existing backup, the expiration time specified must be
    +     * less than `Backup.max_expire_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getMaxExpireTimeOrBuilder() { + if (maxExpireTimeBuilder_ != null) { + return maxExpireTimeBuilder_.getMessageOrBuilder(); + } else { + return maxExpireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : maxExpireTime_; + } + } + /** + * + * + *
    +     * Output only. The max allowed expiration time of the backup, with
    +     * microseconds granularity. A backup's expiration time can be configured in
    +     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    +     * copying an existing backup, the expiration time specified must be
    +     * less than `Backup.max_expire_time`.
    +     * 
    + * + * + * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getMaxExpireTimeFieldBuilder() { + if (maxExpireTimeBuilder_ == null) { + maxExpireTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getMaxExpireTime(), getParentForChildren(), isClean()); + maxExpireTime_ = null; + } + return maxExpireTimeBuilder_; + } + + private com.google.protobuf.LazyStringArrayList backupSchedules_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureBackupSchedulesIsMutable() { + if (!backupSchedules_.isModifiable()) { + backupSchedules_ = new com.google.protobuf.LazyStringArrayList(backupSchedules_); + } + bitField0_ |= 0x00008000; + } + /** + * + * + *
    +     * Output only. List of backup schedule URIs that are associated with
    +     * creating this backup. This is only applicable for scheduled backups, and
    +     * is empty for on-demand backups.
    +     *
    +     * To optimize for storage, whenever possible, multiple schedules are
    +     * collapsed together to create one backup. In such cases, this field captures
    +     * the list of all backup schedule URIs that are associated with creating
    +     * this backup. If collapsing is not done, then this field captures the
    +     * single backup schedule URI associated with creating this backup.
    +     * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the backupSchedules. + */ + public com.google.protobuf.ProtocolStringList getBackupSchedulesList() { + backupSchedules_.makeImmutable(); + return backupSchedules_; + } + /** + * + * + *
    +     * Output only. List of backup schedule URIs that are associated with
    +     * creating this backup. This is only applicable for scheduled backups, and
    +     * is empty for on-demand backups.
    +     *
    +     * To optimize for storage, whenever possible, multiple schedules are
    +     * collapsed together to create one backup. In such cases, this field captures
    +     * the list of all backup schedule URIs that are associated with creating
    +     * this backup. If collapsing is not done, then this field captures the
    +     * single backup schedule URI associated with creating this backup.
    +     * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The count of backupSchedules. + */ + public int getBackupSchedulesCount() { + return backupSchedules_.size(); + } + /** + * + * + *
    +     * Output only. List of backup schedule URIs that are associated with
    +     * creating this backup. This is only applicable for scheduled backups, and
    +     * is empty for on-demand backups.
    +     *
    +     * To optimize for storage, whenever possible, multiple schedules are
    +     * collapsed together to create one backup. In such cases, this field captures
    +     * the list of all backup schedule URIs that are associated with creating
    +     * this backup. If collapsing is not done, then this field captures the
    +     * single backup schedule URI associated with creating this backup.
    +     * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The backupSchedules at the given index. + */ + public java.lang.String getBackupSchedules(int index) { + return backupSchedules_.get(index); } /** * * *
    -     * Output only. The names of the destination backups being created by copying
    -     * this source backup. The backup names are of the form
    -     * `projects/<project>/instances/<instance>/backups/<backup>`.
    -     * Referencing backups may exist in different instances. The existence of
    -     * any referencing backup prevents the backup from being deleted. When the
    -     * copy operation is done (either successfully completed or cancelled or the
    -     * destination backup is deleted), the reference to the backup is removed.
    +     * Output only. List of backup schedule URIs that are associated with
    +     * creating this backup. This is only applicable for scheduled backups, and
    +     * is empty for on-demand backups.
    +     *
    +     * To optimize for storage, whenever possible, multiple schedules are
    +     * collapsed together to create one backup. In such cases, this field captures
    +     * the list of all backup schedule URIs that are associated with creating
    +     * this backup. If collapsing is not done, then this field captures the
    +     * single backup schedule URI associated with creating this backup.
          * 
    * * - * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } * * * @param index The index of the value to return. - * @return The bytes of the referencingBackups at the given index. + * @return The bytes of the backupSchedules at the given index. */ - public com.google.protobuf.ByteString getReferencingBackupsBytes(int index) { - return referencingBackups_.getByteString(index); + public com.google.protobuf.ByteString getBackupSchedulesBytes(int index) { + return backupSchedules_.getByteString(index); } /** * * *
    -     * Output only. The names of the destination backups being created by copying
    -     * this source backup. The backup names are of the form
    -     * `projects/<project>/instances/<instance>/backups/<backup>`.
    -     * Referencing backups may exist in different instances. The existence of
    -     * any referencing backup prevents the backup from being deleted. When the
    -     * copy operation is done (either successfully completed or cancelled or the
    -     * destination backup is deleted), the reference to the backup is removed.
    +     * Output only. List of backup schedule URIs that are associated with
    +     * creating this backup. This is only applicable for scheduled backups, and
    +     * is empty for on-demand backups.
    +     *
    +     * To optimize for storage, whenever possible, multiple schedules are
    +     * collapsed together to create one backup. In such cases, this field captures
    +     * the list of all backup schedule URIs that are associated with creating
    +     * this backup. If collapsing is not done, then this field captures the
    +     * single backup schedule URI associated with creating this backup.
          * 
    * * - * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } * * * @param index The index to set the value at. - * @param value The referencingBackups to set. + * @param value The backupSchedules to set. * @return This builder for chaining. */ - public Builder setReferencingBackups(int index, java.lang.String value) { + public Builder setBackupSchedules(int index, java.lang.String value) { if (value == null) { throw new NullPointerException(); } - ensureReferencingBackupsIsMutable(); - referencingBackups_.set(index, value); - bitField0_ |= 0x00000400; + ensureBackupSchedulesIsMutable(); + backupSchedules_.set(index, value); + bitField0_ |= 0x00008000; onChanged(); return this; } @@ -3436,29 +5223,31 @@ public Builder setReferencingBackups(int index, java.lang.String value) { * * *
    -     * Output only. The names of the destination backups being created by copying
    -     * this source backup. The backup names are of the form
    -     * `projects/<project>/instances/<instance>/backups/<backup>`.
    -     * Referencing backups may exist in different instances. The existence of
    -     * any referencing backup prevents the backup from being deleted. When the
    -     * copy operation is done (either successfully completed or cancelled or the
    -     * destination backup is deleted), the reference to the backup is removed.
    +     * Output only. List of backup schedule URIs that are associated with
    +     * creating this backup. This is only applicable for scheduled backups, and
    +     * is empty for on-demand backups.
    +     *
    +     * To optimize for storage, whenever possible, multiple schedules are
    +     * collapsed together to create one backup. In such cases, this field captures
    +     * the list of all backup schedule URIs that are associated with creating
    +     * this backup. If collapsing is not done, then this field captures the
    +     * single backup schedule URI associated with creating this backup.
          * 
    * * - * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } * * - * @param value The referencingBackups to add. + * @param value The backupSchedules to add. * @return This builder for chaining. */ - public Builder addReferencingBackups(java.lang.String value) { + public Builder addBackupSchedules(java.lang.String value) { if (value == null) { throw new NullPointerException(); } - ensureReferencingBackupsIsMutable(); - referencingBackups_.add(value); - bitField0_ |= 0x00000400; + ensureBackupSchedulesIsMutable(); + backupSchedules_.add(value); + bitField0_ |= 0x00008000; onChanged(); return this; } @@ -3466,26 +5255,28 @@ public Builder addReferencingBackups(java.lang.String value) { * * *
    -     * Output only. The names of the destination backups being created by copying
    -     * this source backup. The backup names are of the form
    -     * `projects/<project>/instances/<instance>/backups/<backup>`.
    -     * Referencing backups may exist in different instances. The existence of
    -     * any referencing backup prevents the backup from being deleted. When the
    -     * copy operation is done (either successfully completed or cancelled or the
    -     * destination backup is deleted), the reference to the backup is removed.
    +     * Output only. List of backup schedule URIs that are associated with
    +     * creating this backup. This is only applicable for scheduled backups, and
    +     * is empty for on-demand backups.
    +     *
    +     * To optimize for storage, whenever possible, multiple schedules are
    +     * collapsed together to create one backup. In such cases, this field captures
    +     * the list of all backup schedule URIs that are associated with creating
    +     * this backup. If collapsing is not done, then this field captures the
    +     * single backup schedule URI associated with creating this backup.
          * 
    * * - * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } * * - * @param values The referencingBackups to add. + * @param values The backupSchedules to add. * @return This builder for chaining. */ - public Builder addAllReferencingBackups(java.lang.Iterable values) { - ensureReferencingBackupsIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll(values, referencingBackups_); - bitField0_ |= 0x00000400; + public Builder addAllBackupSchedules(java.lang.Iterable values) { + ensureBackupSchedulesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, backupSchedules_); + bitField0_ |= 0x00008000; onChanged(); return this; } @@ -3493,24 +5284,26 @@ public Builder addAllReferencingBackups(java.lang.Iterable val * * *
    -     * Output only. The names of the destination backups being created by copying
    -     * this source backup. The backup names are of the form
    -     * `projects/<project>/instances/<instance>/backups/<backup>`.
    -     * Referencing backups may exist in different instances. The existence of
    -     * any referencing backup prevents the backup from being deleted. When the
    -     * copy operation is done (either successfully completed or cancelled or the
    -     * destination backup is deleted), the reference to the backup is removed.
    +     * Output only. List of backup schedule URIs that are associated with
    +     * creating this backup. This is only applicable for scheduled backups, and
    +     * is empty for on-demand backups.
    +     *
    +     * To optimize for storage, whenever possible, multiple schedules are
    +     * collapsed together to create one backup. In such cases, this field captures
    +     * the list of all backup schedule URIs that are associated with creating
    +     * this backup. If collapsing is not done, then this field captures the
    +     * single backup schedule URI associated with creating this backup.
          * 
    * * - * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } * * * @return This builder for chaining. */ - public Builder clearReferencingBackups() { - referencingBackups_ = com.google.protobuf.LazyStringArrayList.emptyList(); - bitField0_ = (bitField0_ & ~0x00000400); + public Builder clearBackupSchedules() { + backupSchedules_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00008000); ; onChanged(); return this; @@ -3519,111 +5312,247 @@ public Builder clearReferencingBackups() { * * *
    -     * Output only. The names of the destination backups being created by copying
    -     * this source backup. The backup names are of the form
    -     * `projects/<project>/instances/<instance>/backups/<backup>`.
    -     * Referencing backups may exist in different instances. The existence of
    -     * any referencing backup prevents the backup from being deleted. When the
    -     * copy operation is done (either successfully completed or cancelled or the
    -     * destination backup is deleted), the reference to the backup is removed.
    +     * Output only. List of backup schedule URIs that are associated with
    +     * creating this backup. This is only applicable for scheduled backups, and
    +     * is empty for on-demand backups.
    +     *
    +     * To optimize for storage, whenever possible, multiple schedules are
    +     * collapsed together to create one backup. In such cases, this field captures
    +     * the list of all backup schedule URIs that are associated with creating
    +     * this backup. If collapsing is not done, then this field captures the
    +     * single backup schedule URI associated with creating this backup.
          * 
    * * - * repeated string referencing_backups = 11 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } * * - * @param value The bytes of the referencingBackups to add. + * @param value The bytes of the backupSchedules to add. * @return This builder for chaining. */ - public Builder addReferencingBackupsBytes(com.google.protobuf.ByteString value) { + public Builder addBackupSchedulesBytes(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); - ensureReferencingBackupsIsMutable(); - referencingBackups_.add(value); - bitField0_ |= 0x00000400; + ensureBackupSchedulesIsMutable(); + backupSchedules_.add(value); + bitField0_ |= 0x00008000; onChanged(); return this; } - private com.google.protobuf.Timestamp maxExpireTime_; + private java.lang.Object incrementalBackupChainId_ = ""; + /** + * + * + *
    +     * Output only. Populated only for backups in an incremental backup chain.
    +     * Backups share the same chain id if and only if they belong to the same
    +     * incremental backup chain. Use this field to determine which backups are
    +     * part of the same incremental backup chain. The ordering of backups in the
    +     * chain can be determined by ordering the backup `version_time`.
    +     * 
    + * + * string incremental_backup_chain_id = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The incrementalBackupChainId. + */ + public java.lang.String getIncrementalBackupChainId() { + java.lang.Object ref = incrementalBackupChainId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + incrementalBackupChainId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
    +     * Output only. Populated only for backups in an incremental backup chain.
    +     * Backups share the same chain id if and only if they belong to the same
    +     * incremental backup chain. Use this field to determine which backups are
    +     * part of the same incremental backup chain. The ordering of backups in the
    +     * chain can be determined by ordering the backup `version_time`.
    +     * 
    + * + * string incremental_backup_chain_id = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The bytes for incrementalBackupChainId. + */ + public com.google.protobuf.ByteString getIncrementalBackupChainIdBytes() { + java.lang.Object ref = incrementalBackupChainId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + incrementalBackupChainId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
    +     * Output only. Populated only for backups in an incremental backup chain.
    +     * Backups share the same chain id if and only if they belong to the same
    +     * incremental backup chain. Use this field to determine which backups are
    +     * part of the same incremental backup chain. The ordering of backups in the
    +     * chain can be determined by ordering the backup `version_time`.
    +     * 
    + * + * string incremental_backup_chain_id = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The incrementalBackupChainId to set. + * @return This builder for chaining. + */ + public Builder setIncrementalBackupChainId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + incrementalBackupChainId_ = value; + bitField0_ |= 0x00010000; + onChanged(); + return this; + } + /** + * + * + *
    +     * Output only. Populated only for backups in an incremental backup chain.
    +     * Backups share the same chain id if and only if they belong to the same
    +     * incremental backup chain. Use this field to determine which backups are
    +     * part of the same incremental backup chain. The ordering of backups in the
    +     * chain can be determined by ordering the backup `version_time`.
    +     * 
    + * + * string incremental_backup_chain_id = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return This builder for chaining. + */ + public Builder clearIncrementalBackupChainId() { + incrementalBackupChainId_ = getDefaultInstance().getIncrementalBackupChainId(); + bitField0_ = (bitField0_ & ~0x00010000); + onChanged(); + return this; + } + /** + * + * + *
    +     * Output only. Populated only for backups in an incremental backup chain.
    +     * Backups share the same chain id if and only if they belong to the same
    +     * incremental backup chain. Use this field to determine which backups are
    +     * part of the same incremental backup chain. The ordering of backups in the
    +     * chain can be determined by ordering the backup `version_time`.
    +     * 
    + * + * string incremental_backup_chain_id = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @param value The bytes for incrementalBackupChainId to set. + * @return This builder for chaining. + */ + public Builder setIncrementalBackupChainIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + incrementalBackupChainId_ = value; + bitField0_ |= 0x00010000; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp oldestVersionTime_; private com.google.protobuf.SingleFieldBuilderV3< com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder> - maxExpireTimeBuilder_; + oldestVersionTimeBuilder_; /** * * *
    -     * Output only. The max allowed expiration time of the backup, with
    -     * microseconds granularity. A backup's expiration time can be configured in
    -     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    -     * copying an existing backup, the expiration time specified must be
    -     * less than `Backup.max_expire_time`.
    +     * Output only. Data deleted at a time older than this is guaranteed not to be
    +     * retained in order to support this backup. For a backup in an incremental
    +     * backup chain, this is the version time of the oldest backup that exists or
    +     * ever existed in the chain. For all other backups, this is the version time
    +     * of the backup. This field can be used to understand what data is being
    +     * retained by the backup system.
          * 
    * * - * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; * * - * @return Whether the maxExpireTime field is set. + * @return Whether the oldestVersionTime field is set. */ - public boolean hasMaxExpireTime() { - return ((bitField0_ & 0x00000800) != 0); + public boolean hasOldestVersionTime() { + return ((bitField0_ & 0x00020000) != 0); } /** * * *
    -     * Output only. The max allowed expiration time of the backup, with
    -     * microseconds granularity. A backup's expiration time can be configured in
    -     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    -     * copying an existing backup, the expiration time specified must be
    -     * less than `Backup.max_expire_time`.
    +     * Output only. Data deleted at a time older than this is guaranteed not to be
    +     * retained in order to support this backup. For a backup in an incremental
    +     * backup chain, this is the version time of the oldest backup that exists or
    +     * ever existed in the chain. For all other backups, this is the version time
    +     * of the backup. This field can be used to understand what data is being
    +     * retained by the backup system.
          * 
    * * - * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; * * - * @return The maxExpireTime. + * @return The oldestVersionTime. */ - public com.google.protobuf.Timestamp getMaxExpireTime() { - if (maxExpireTimeBuilder_ == null) { - return maxExpireTime_ == null + public com.google.protobuf.Timestamp getOldestVersionTime() { + if (oldestVersionTimeBuilder_ == null) { + return oldestVersionTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() - : maxExpireTime_; + : oldestVersionTime_; } else { - return maxExpireTimeBuilder_.getMessage(); + return oldestVersionTimeBuilder_.getMessage(); } } /** * * *
    -     * Output only. The max allowed expiration time of the backup, with
    -     * microseconds granularity. A backup's expiration time can be configured in
    -     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    -     * copying an existing backup, the expiration time specified must be
    -     * less than `Backup.max_expire_time`.
    +     * Output only. Data deleted at a time older than this is guaranteed not to be
    +     * retained in order to support this backup. For a backup in an incremental
    +     * backup chain, this is the version time of the oldest backup that exists or
    +     * ever existed in the chain. For all other backups, this is the version time
    +     * of the backup. This field can be used to understand what data is being
    +     * retained by the backup system.
          * 
    * * - * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; * */ - public Builder setMaxExpireTime(com.google.protobuf.Timestamp value) { - if (maxExpireTimeBuilder_ == null) { + public Builder setOldestVersionTime(com.google.protobuf.Timestamp value) { + if (oldestVersionTimeBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - maxExpireTime_ = value; + oldestVersionTime_ = value; } else { - maxExpireTimeBuilder_.setMessage(value); + oldestVersionTimeBuilder_.setMessage(value); } - bitField0_ |= 0x00000800; + bitField0_ |= 0x00020000; onChanged(); return this; } @@ -3631,24 +5560,25 @@ public Builder setMaxExpireTime(com.google.protobuf.Timestamp value) { * * *
    -     * Output only. The max allowed expiration time of the backup, with
    -     * microseconds granularity. A backup's expiration time can be configured in
    -     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    -     * copying an existing backup, the expiration time specified must be
    -     * less than `Backup.max_expire_time`.
    +     * Output only. Data deleted at a time older than this is guaranteed not to be
    +     * retained in order to support this backup. For a backup in an incremental
    +     * backup chain, this is the version time of the oldest backup that exists or
    +     * ever existed in the chain. For all other backups, this is the version time
    +     * of the backup. This field can be used to understand what data is being
    +     * retained by the backup system.
          * 
    * * - * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; * */ - public Builder setMaxExpireTime(com.google.protobuf.Timestamp.Builder builderForValue) { - if (maxExpireTimeBuilder_ == null) { - maxExpireTime_ = builderForValue.build(); + public Builder setOldestVersionTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (oldestVersionTimeBuilder_ == null) { + oldestVersionTime_ = builderForValue.build(); } else { - maxExpireTimeBuilder_.setMessage(builderForValue.build()); + oldestVersionTimeBuilder_.setMessage(builderForValue.build()); } - bitField0_ |= 0x00000800; + bitField0_ |= 0x00020000; onChanged(); return this; } @@ -3656,31 +5586,32 @@ public Builder setMaxExpireTime(com.google.protobuf.Timestamp.Builder builderFor * * *
    -     * Output only. The max allowed expiration time of the backup, with
    -     * microseconds granularity. A backup's expiration time can be configured in
    -     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    -     * copying an existing backup, the expiration time specified must be
    -     * less than `Backup.max_expire_time`.
    +     * Output only. Data deleted at a time older than this is guaranteed not to be
    +     * retained in order to support this backup. For a backup in an incremental
    +     * backup chain, this is the version time of the oldest backup that exists or
    +     * ever existed in the chain. For all other backups, this is the version time
    +     * of the backup. This field can be used to understand what data is being
    +     * retained by the backup system.
          * 
    * * - * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; * */ - public Builder mergeMaxExpireTime(com.google.protobuf.Timestamp value) { - if (maxExpireTimeBuilder_ == null) { - if (((bitField0_ & 0x00000800) != 0) - && maxExpireTime_ != null - && maxExpireTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { - getMaxExpireTimeBuilder().mergeFrom(value); + public Builder mergeOldestVersionTime(com.google.protobuf.Timestamp value) { + if (oldestVersionTimeBuilder_ == null) { + if (((bitField0_ & 0x00020000) != 0) + && oldestVersionTime_ != null + && oldestVersionTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getOldestVersionTimeBuilder().mergeFrom(value); } else { - maxExpireTime_ = value; + oldestVersionTime_ = value; } } else { - maxExpireTimeBuilder_.mergeFrom(value); + oldestVersionTimeBuilder_.mergeFrom(value); } - if (maxExpireTime_ != null) { - bitField0_ |= 0x00000800; + if (oldestVersionTime_ != null) { + bitField0_ |= 0x00020000; onChanged(); } return this; @@ -3689,23 +5620,24 @@ public Builder mergeMaxExpireTime(com.google.protobuf.Timestamp value) { * * *
    -     * Output only. The max allowed expiration time of the backup, with
    -     * microseconds granularity. A backup's expiration time can be configured in
    -     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    -     * copying an existing backup, the expiration time specified must be
    -     * less than `Backup.max_expire_time`.
    +     * Output only. Data deleted at a time older than this is guaranteed not to be
    +     * retained in order to support this backup. For a backup in an incremental
    +     * backup chain, this is the version time of the oldest backup that exists or
    +     * ever existed in the chain. For all other backups, this is the version time
    +     * of the backup. This field can be used to understand what data is being
    +     * retained by the backup system.
          * 
    * * - * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; * */ - public Builder clearMaxExpireTime() { - bitField0_ = (bitField0_ & ~0x00000800); - maxExpireTime_ = null; - if (maxExpireTimeBuilder_ != null) { - maxExpireTimeBuilder_.dispose(); - maxExpireTimeBuilder_ = null; + public Builder clearOldestVersionTime() { + bitField0_ = (bitField0_ & ~0x00020000); + oldestVersionTime_ = null; + if (oldestVersionTimeBuilder_ != null) { + oldestVersionTimeBuilder_.dispose(); + oldestVersionTimeBuilder_ = null; } onChanged(); return this; @@ -3714,76 +5646,79 @@ public Builder clearMaxExpireTime() { * * *
    -     * Output only. The max allowed expiration time of the backup, with
    -     * microseconds granularity. A backup's expiration time can be configured in
    -     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    -     * copying an existing backup, the expiration time specified must be
    -     * less than `Backup.max_expire_time`.
    +     * Output only. Data deleted at a time older than this is guaranteed not to be
    +     * retained in order to support this backup. For a backup in an incremental
    +     * backup chain, this is the version time of the oldest backup that exists or
    +     * ever existed in the chain. For all other backups, this is the version time
    +     * of the backup. This field can be used to understand what data is being
    +     * retained by the backup system.
          * 
    * * - * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; * */ - public com.google.protobuf.Timestamp.Builder getMaxExpireTimeBuilder() { - bitField0_ |= 0x00000800; + public com.google.protobuf.Timestamp.Builder getOldestVersionTimeBuilder() { + bitField0_ |= 0x00020000; onChanged(); - return getMaxExpireTimeFieldBuilder().getBuilder(); + return getOldestVersionTimeFieldBuilder().getBuilder(); } /** * * *
    -     * Output only. The max allowed expiration time of the backup, with
    -     * microseconds granularity. A backup's expiration time can be configured in
    -     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    -     * copying an existing backup, the expiration time specified must be
    -     * less than `Backup.max_expire_time`.
    +     * Output only. Data deleted at a time older than this is guaranteed not to be
    +     * retained in order to support this backup. For a backup in an incremental
    +     * backup chain, this is the version time of the oldest backup that exists or
    +     * ever existed in the chain. For all other backups, this is the version time
    +     * of the backup. This field can be used to understand what data is being
    +     * retained by the backup system.
          * 
    * * - * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; * */ - public com.google.protobuf.TimestampOrBuilder getMaxExpireTimeOrBuilder() { - if (maxExpireTimeBuilder_ != null) { - return maxExpireTimeBuilder_.getMessageOrBuilder(); + public com.google.protobuf.TimestampOrBuilder getOldestVersionTimeOrBuilder() { + if (oldestVersionTimeBuilder_ != null) { + return oldestVersionTimeBuilder_.getMessageOrBuilder(); } else { - return maxExpireTime_ == null + return oldestVersionTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() - : maxExpireTime_; + : oldestVersionTime_; } } /** * * *
    -     * Output only. The max allowed expiration time of the backup, with
    -     * microseconds granularity. A backup's expiration time can be configured in
    -     * multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
    -     * copying an existing backup, the expiration time specified must be
    -     * less than `Backup.max_expire_time`.
    +     * Output only. Data deleted at a time older than this is guaranteed not to be
    +     * retained in order to support this backup. For a backup in an incremental
    +     * backup chain, this is the version time of the oldest backup that exists or
    +     * ever existed in the chain. For all other backups, this is the version time
    +     * of the backup. This field can be used to understand what data is being
    +     * retained by the backup system.
          * 
    * * - * .google.protobuf.Timestamp max_expire_time = 12 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; * */ private com.google.protobuf.SingleFieldBuilderV3< com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder> - getMaxExpireTimeFieldBuilder() { - if (maxExpireTimeBuilder_ == null) { - maxExpireTimeBuilder_ = + getOldestVersionTimeFieldBuilder() { + if (oldestVersionTimeBuilder_ == null) { + oldestVersionTimeBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< com.google.protobuf.Timestamp, com.google.protobuf.Timestamp.Builder, com.google.protobuf.TimestampOrBuilder>( - getMaxExpireTime(), getParentForChildren(), isClean()); - maxExpireTime_ = null; + getOldestVersionTime(), getParentForChildren(), isClean()); + oldestVersionTime_ = null; } - return maxExpireTimeBuilder_; + return oldestVersionTimeBuilder_; } @java.lang.Override diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupInfo.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupInfo.java index 75527a2c9e3..82e0f8682bc 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupInfo.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupInfo.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** @@ -124,9 +124,9 @@ public com.google.protobuf.ByteString getBackupBytes() { *
        * The backup contains an externally consistent copy of `source_database` at
        * the timestamp specified by `version_time`. If the
    -   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request did not specify
    -   * `version_time`, the `version_time` of the backup is equivalent to the
    -   * `create_time`.
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request did not specify `version_time`, the `version_time` of the backup is
    +   * equivalent to the `create_time`.
        * 
    * * .google.protobuf.Timestamp version_time = 4; @@ -143,9 +143,9 @@ public boolean hasVersionTime() { *
        * The backup contains an externally consistent copy of `source_database` at
        * the timestamp specified by `version_time`. If the
    -   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request did not specify
    -   * `version_time`, the `version_time` of the backup is equivalent to the
    -   * `create_time`.
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request did not specify `version_time`, the `version_time` of the backup is
    +   * equivalent to the `create_time`.
        * 
    * * .google.protobuf.Timestamp version_time = 4; @@ -162,9 +162,9 @@ public com.google.protobuf.Timestamp getVersionTime() { *
        * The backup contains an externally consistent copy of `source_database` at
        * the timestamp specified by `version_time`. If the
    -   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request did not specify
    -   * `version_time`, the `version_time` of the backup is equivalent to the
    -   * `create_time`.
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request did not specify `version_time`, the `version_time` of the backup is
    +   * equivalent to the `create_time`.
        * 
    * * .google.protobuf.Timestamp version_time = 4; @@ -180,8 +180,9 @@ public com.google.protobuf.TimestampOrBuilder getVersionTimeOrBuilder() { * * *
    -   * The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request was
    -   * received.
    +   * The time the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request was received.
        * 
    * * .google.protobuf.Timestamp create_time = 2; @@ -196,8 +197,9 @@ public boolean hasCreateTime() { * * *
    -   * The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request was
    -   * received.
    +   * The time the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request was received.
        * 
    * * .google.protobuf.Timestamp create_time = 2; @@ -212,8 +214,9 @@ public com.google.protobuf.Timestamp getCreateTime() { * * *
    -   * The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request was
    -   * received.
    +   * The time the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request was received.
        * 
    * * .google.protobuf.Timestamp create_time = 2; @@ -835,9 +838,9 @@ public Builder setBackupBytes(com.google.protobuf.ByteString value) { *
          * The backup contains an externally consistent copy of `source_database` at
          * the timestamp specified by `version_time`. If the
    -     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request did not specify
    -     * `version_time`, the `version_time` of the backup is equivalent to the
    -     * `create_time`.
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request did not specify `version_time`, the `version_time` of the backup is
    +     * equivalent to the `create_time`.
          * 
    * * .google.protobuf.Timestamp version_time = 4; @@ -853,9 +856,9 @@ public boolean hasVersionTime() { *
          * The backup contains an externally consistent copy of `source_database` at
          * the timestamp specified by `version_time`. If the
    -     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request did not specify
    -     * `version_time`, the `version_time` of the backup is equivalent to the
    -     * `create_time`.
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request did not specify `version_time`, the `version_time` of the backup is
    +     * equivalent to the `create_time`.
          * 
    * * .google.protobuf.Timestamp version_time = 4; @@ -877,9 +880,9 @@ public com.google.protobuf.Timestamp getVersionTime() { *
          * The backup contains an externally consistent copy of `source_database` at
          * the timestamp specified by `version_time`. If the
    -     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request did not specify
    -     * `version_time`, the `version_time` of the backup is equivalent to the
    -     * `create_time`.
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request did not specify `version_time`, the `version_time` of the backup is
    +     * equivalent to the `create_time`.
          * 
    * * .google.protobuf.Timestamp version_time = 4; @@ -903,9 +906,9 @@ public Builder setVersionTime(com.google.protobuf.Timestamp value) { *
          * The backup contains an externally consistent copy of `source_database` at
          * the timestamp specified by `version_time`. If the
    -     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request did not specify
    -     * `version_time`, the `version_time` of the backup is equivalent to the
    -     * `create_time`.
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request did not specify `version_time`, the `version_time` of the backup is
    +     * equivalent to the `create_time`.
          * 
    * * .google.protobuf.Timestamp version_time = 4; @@ -926,9 +929,9 @@ public Builder setVersionTime(com.google.protobuf.Timestamp.Builder builderForVa *
          * The backup contains an externally consistent copy of `source_database` at
          * the timestamp specified by `version_time`. If the
    -     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request did not specify
    -     * `version_time`, the `version_time` of the backup is equivalent to the
    -     * `create_time`.
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request did not specify `version_time`, the `version_time` of the backup is
    +     * equivalent to the `create_time`.
          * 
    * * .google.protobuf.Timestamp version_time = 4; @@ -957,9 +960,9 @@ public Builder mergeVersionTime(com.google.protobuf.Timestamp value) { *
          * The backup contains an externally consistent copy of `source_database` at
          * the timestamp specified by `version_time`. If the
    -     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request did not specify
    -     * `version_time`, the `version_time` of the backup is equivalent to the
    -     * `create_time`.
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request did not specify `version_time`, the `version_time` of the backup is
    +     * equivalent to the `create_time`.
          * 
    * * .google.protobuf.Timestamp version_time = 4; @@ -980,9 +983,9 @@ public Builder clearVersionTime() { *
          * The backup contains an externally consistent copy of `source_database` at
          * the timestamp specified by `version_time`. If the
    -     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request did not specify
    -     * `version_time`, the `version_time` of the backup is equivalent to the
    -     * `create_time`.
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request did not specify `version_time`, the `version_time` of the backup is
    +     * equivalent to the `create_time`.
          * 
    * * .google.protobuf.Timestamp version_time = 4; @@ -998,9 +1001,9 @@ public com.google.protobuf.Timestamp.Builder getVersionTimeBuilder() { *
          * The backup contains an externally consistent copy of `source_database` at
          * the timestamp specified by `version_time`. If the
    -     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request did not specify
    -     * `version_time`, the `version_time` of the backup is equivalent to the
    -     * `create_time`.
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request did not specify `version_time`, the `version_time` of the backup is
    +     * equivalent to the `create_time`.
          * 
    * * .google.protobuf.Timestamp version_time = 4; @@ -1020,9 +1023,9 @@ public com.google.protobuf.TimestampOrBuilder getVersionTimeOrBuilder() { *
          * The backup contains an externally consistent copy of `source_database` at
          * the timestamp specified by `version_time`. If the
    -     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request did not specify
    -     * `version_time`, the `version_time` of the backup is equivalent to the
    -     * `create_time`.
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request did not specify `version_time`, the `version_time` of the backup is
    +     * equivalent to the `create_time`.
          * 
    * * .google.protobuf.Timestamp version_time = 4; @@ -1054,8 +1057,9 @@ public com.google.protobuf.TimestampOrBuilder getVersionTimeOrBuilder() { * * *
    -     * The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request was
    -     * received.
    +     * The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request was received.
          * 
    * * .google.protobuf.Timestamp create_time = 2; @@ -1069,8 +1073,9 @@ public boolean hasCreateTime() { * * *
    -     * The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request was
    -     * received.
    +     * The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request was received.
          * 
    * * .google.protobuf.Timestamp create_time = 2; @@ -1090,8 +1095,9 @@ public com.google.protobuf.Timestamp getCreateTime() { * * *
    -     * The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request was
    -     * received.
    +     * The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request was received.
          * 
    * * .google.protobuf.Timestamp create_time = 2; @@ -1113,8 +1119,9 @@ public Builder setCreateTime(com.google.protobuf.Timestamp value) { * * *
    -     * The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request was
    -     * received.
    +     * The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request was received.
          * 
    * * .google.protobuf.Timestamp create_time = 2; @@ -1133,8 +1140,9 @@ public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForVal * * *
    -     * The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request was
    -     * received.
    +     * The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request was received.
          * 
    * * .google.protobuf.Timestamp create_time = 2; @@ -1161,8 +1169,9 @@ public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { * * *
    -     * The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request was
    -     * received.
    +     * The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request was received.
          * 
    * * .google.protobuf.Timestamp create_time = 2; @@ -1181,8 +1190,9 @@ public Builder clearCreateTime() { * * *
    -     * The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request was
    -     * received.
    +     * The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request was received.
          * 
    * * .google.protobuf.Timestamp create_time = 2; @@ -1196,8 +1206,9 @@ public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { * * *
    -     * The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request was
    -     * received.
    +     * The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request was received.
          * 
    * * .google.protobuf.Timestamp create_time = 2; @@ -1215,8 +1226,9 @@ public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { * * *
    -     * The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request was
    -     * received.
    +     * The time the
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * request was received.
          * 
    * * .google.protobuf.Timestamp create_time = 2; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupInfoOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupInfoOrBuilder.java index 5a2963f8b9f..7ec09b51162 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupInfoOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupInfoOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface BackupInfoOrBuilder @@ -55,9 +55,9 @@ public interface BackupInfoOrBuilder *
        * The backup contains an externally consistent copy of `source_database` at
        * the timestamp specified by `version_time`. If the
    -   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request did not specify
    -   * `version_time`, the `version_time` of the backup is equivalent to the
    -   * `create_time`.
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request did not specify `version_time`, the `version_time` of the backup is
    +   * equivalent to the `create_time`.
        * 
    * * .google.protobuf.Timestamp version_time = 4; @@ -71,9 +71,9 @@ public interface BackupInfoOrBuilder *
        * The backup contains an externally consistent copy of `source_database` at
        * the timestamp specified by `version_time`. If the
    -   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request did not specify
    -   * `version_time`, the `version_time` of the backup is equivalent to the
    -   * `create_time`.
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request did not specify `version_time`, the `version_time` of the backup is
    +   * equivalent to the `create_time`.
        * 
    * * .google.protobuf.Timestamp version_time = 4; @@ -87,9 +87,9 @@ public interface BackupInfoOrBuilder *
        * The backup contains an externally consistent copy of `source_database` at
        * the timestamp specified by `version_time`. If the
    -   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request did not specify
    -   * `version_time`, the `version_time` of the backup is equivalent to the
    -   * `create_time`.
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request did not specify `version_time`, the `version_time` of the backup is
    +   * equivalent to the `create_time`.
        * 
    * * .google.protobuf.Timestamp version_time = 4; @@ -100,8 +100,9 @@ public interface BackupInfoOrBuilder * * *
    -   * The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request was
    -   * received.
    +   * The time the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request was received.
        * 
    * * .google.protobuf.Timestamp create_time = 2; @@ -113,8 +114,9 @@ public interface BackupInfoOrBuilder * * *
    -   * The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request was
    -   * received.
    +   * The time the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request was received.
        * 
    * * .google.protobuf.Timestamp create_time = 2; @@ -126,8 +128,9 @@ public interface BackupInfoOrBuilder * * *
    -   * The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request was
    -   * received.
    +   * The time the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * request was received.
        * 
    * * .google.protobuf.Timestamp create_time = 2; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupOrBuilder.java index bd25d67c5ab..2cdc262624f 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface BackupOrBuilder @@ -28,10 +28,10 @@ public interface BackupOrBuilder * * *
    -   * Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    -   * Name of the database from which this backup was
    -   * created. This needs to be in the same instance as the backup.
    -   * Values are of the form
    +   * Required for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation. Name of the database from which this backup was created. This
    +   * needs to be in the same instance as the backup. Values are of the form
        * `projects/<project>/instances/<instance>/databases/<database>`.
        * 
    * @@ -44,10 +44,10 @@ public interface BackupOrBuilder * * *
    -   * Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    -   * Name of the database from which this backup was
    -   * created. This needs to be in the same instance as the backup.
    -   * Values are of the form
    +   * Required for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation. Name of the database from which this backup was created. This
    +   * needs to be in the same instance as the backup. Values are of the form
        * `projects/<project>/instances/<instance>/databases/<database>`.
        * 
    * @@ -105,7 +105,8 @@ public interface BackupOrBuilder * * *
    -   * Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * Required for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
        * operation. The expiration time of the backup, with microseconds
        * granularity that must be at least 6 hours and at most 366 days
        * from the time the CreateBackup request is processed. Once the `expire_time`
    @@ -122,7 +123,8 @@ public interface BackupOrBuilder
        *
        *
        * 
    -   * Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * Required for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
        * operation. The expiration time of the backup, with microseconds
        * granularity that must be at least 6 hours and at most 366 days
        * from the time the CreateBackup request is processed. Once the `expire_time`
    @@ -139,7 +141,8 @@ public interface BackupOrBuilder
        *
        *
        * 
    -   * Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * Required for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
        * operation. The expiration time of the backup, with microseconds
        * granularity that must be at least 6 hours and at most 366 days
        * from the time the CreateBackup request is processed. Once the `expire_time`
    @@ -155,8 +158,11 @@ public interface BackupOrBuilder
        *
        *
        * 
    -   * Output only for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    -   * Required for the [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup] operation.
    +   * Output only for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation. Required for the
    +   * [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]
    +   * operation.
        *
        * A globally unique identifier for the backup which cannot be
        * changed. Values are of the form
    @@ -179,8 +185,11 @@ public interface BackupOrBuilder
        *
        *
        * 
    -   * Output only for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    -   * Required for the [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup] operation.
    +   * Output only for the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation. Required for the
    +   * [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]
    +   * operation.
        *
        * A globally unique identifier for the backup which cannot be
        * changed. Values are of the form
    @@ -204,7 +213,8 @@ public interface BackupOrBuilder
        *
        *
        * 
    -   * Output only. The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * Output only. The time the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
        * request is received. If the request does not specify `version_time`, the
        * `version_time` of the backup will be equivalent to the `create_time`.
        * 
    @@ -219,7 +229,8 @@ public interface BackupOrBuilder * * *
    -   * Output only. The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * Output only. The time the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
        * request is received. If the request does not specify `version_time`, the
        * `version_time` of the backup will be equivalent to the `create_time`.
        * 
    @@ -234,7 +245,8 @@ public interface BackupOrBuilder * * *
    -   * Output only. The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * Output only. The time the
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
        * request is received. If the request does not specify `version_time`, the
        * `version_time` of the backup will be equivalent to the `create_time`.
        * 
    @@ -257,6 +269,44 @@ public interface BackupOrBuilder */ long getSizeBytes(); + /** + * + * + *
    +   * Output only. The number of bytes that will be freed by deleting this
    +   * backup. This value will be zero if, for example, this backup is part of an
    +   * incremental backup chain and younger backups in the chain require that we
    +   * keep its data. For backups not in an incremental backup chain, this is
    +   * always the size of the backup. This value may change if backups on the same
    +   * chain get created, deleted or expired.
    +   * 
    + * + * int64 freeable_size_bytes = 15 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The freeableSizeBytes. + */ + long getFreeableSizeBytes(); + + /** + * + * + *
    +   * Output only. For a backup in an incremental backup chain, this is the
    +   * storage space needed to keep the data that has changed since the previous
    +   * backup. For all other backups, this is always the size of the backup. This
    +   * value may change if backups on the same chain get deleted or expired.
    +   *
    +   * This field can be used to calculate the total storage space used by a set
    +   * of backups. For example, the total space used by all backups of a database
    +   * can be computed by summing up this field.
    +   * 
    + * + * int64 exclusive_size_bytes = 16 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The exclusiveSizeBytes. + */ + long getExclusiveSizeBytes(); + /** * * @@ -410,6 +460,95 @@ public interface BackupOrBuilder */ com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder getEncryptionInfoOrBuilder(); + /** + * + * + *
    +   * Output only. The encryption information for the backup, whether it is
    +   * protected by one or more KMS keys. The information includes all Cloud
    +   * KMS key versions used to encrypt the backup. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated. At least one of the key
    +   * versions must be available for the backup to be restored. If a key version
    +   * is revoked in the middle of a restore, the restore behavior is undefined.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List + getEncryptionInformationList(); + /** + * + * + *
    +   * Output only. The encryption information for the backup, whether it is
    +   * protected by one or more KMS keys. The information includes all Cloud
    +   * KMS key versions used to encrypt the backup. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated. At least one of the key
    +   * versions must be available for the backup to be restored. If a key version
    +   * is revoked in the middle of a restore, the restore behavior is undefined.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.spanner.admin.database.v1.EncryptionInfo getEncryptionInformation(int index); + /** + * + * + *
    +   * Output only. The encryption information for the backup, whether it is
    +   * protected by one or more KMS keys. The information includes all Cloud
    +   * KMS key versions used to encrypt the backup. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated. At least one of the key
    +   * versions must be available for the backup to be restored. If a key version
    +   * is revoked in the middle of a restore, the restore behavior is undefined.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + int getEncryptionInformationCount(); + /** + * + * + *
    +   * Output only. The encryption information for the backup, whether it is
    +   * protected by one or more KMS keys. The information includes all Cloud
    +   * KMS key versions used to encrypt the backup. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated. At least one of the key
    +   * versions must be available for the backup to be restored. If a key version
    +   * is revoked in the middle of a restore, the restore behavior is undefined.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List + getEncryptionInformationOrBuilderList(); + /** + * + * + *
    +   * Output only. The encryption information for the backup, whether it is
    +   * protected by one or more KMS keys. The information includes all Cloud
    +   * KMS key versions used to encrypt the backup. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated. At least one of the key
    +   * versions must be available for the backup to be restored. If a key version
    +   * is revoked in the middle of a restore, the restore behavior is undefined.
    +   * 
    + * + * + * repeated .google.spanner.admin.database.v1.EncryptionInfo encryption_information = 13 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder getEncryptionInformationOrBuilder( + int index); + /** * * @@ -574,4 +713,186 @@ public interface BackupOrBuilder * */ com.google.protobuf.TimestampOrBuilder getMaxExpireTimeOrBuilder(); + + /** + * + * + *
    +   * Output only. List of backup schedule URIs that are associated with
    +   * creating this backup. This is only applicable for scheduled backups, and
    +   * is empty for on-demand backups.
    +   *
    +   * To optimize for storage, whenever possible, multiple schedules are
    +   * collapsed together to create one backup. In such cases, this field captures
    +   * the list of all backup schedule URIs that are associated with creating
    +   * this backup. If collapsing is not done, then this field captures the
    +   * single backup schedule URI associated with creating this backup.
    +   * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the backupSchedules. + */ + java.util.List getBackupSchedulesList(); + /** + * + * + *
    +   * Output only. List of backup schedule URIs that are associated with
    +   * creating this backup. This is only applicable for scheduled backups, and
    +   * is empty for on-demand backups.
    +   *
    +   * To optimize for storage, whenever possible, multiple schedules are
    +   * collapsed together to create one backup. In such cases, this field captures
    +   * the list of all backup schedule URIs that are associated with creating
    +   * this backup. If collapsing is not done, then this field captures the
    +   * single backup schedule URI associated with creating this backup.
    +   * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @return The count of backupSchedules. + */ + int getBackupSchedulesCount(); + /** + * + * + *
    +   * Output only. List of backup schedule URIs that are associated with
    +   * creating this backup. This is only applicable for scheduled backups, and
    +   * is empty for on-demand backups.
    +   *
    +   * To optimize for storage, whenever possible, multiple schedules are
    +   * collapsed together to create one backup. In such cases, this field captures
    +   * the list of all backup schedule URIs that are associated with creating
    +   * this backup. If collapsing is not done, then this field captures the
    +   * single backup schedule URI associated with creating this backup.
    +   * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The backupSchedules at the given index. + */ + java.lang.String getBackupSchedules(int index); + /** + * + * + *
    +   * Output only. List of backup schedule URIs that are associated with
    +   * creating this backup. This is only applicable for scheduled backups, and
    +   * is empty for on-demand backups.
    +   *
    +   * To optimize for storage, whenever possible, multiple schedules are
    +   * collapsed together to create one backup. In such cases, this field captures
    +   * the list of all backup schedule URIs that are associated with creating
    +   * this backup. If collapsing is not done, then this field captures the
    +   * single backup schedule URI associated with creating this backup.
    +   * 
    + * + * + * repeated string backup_schedules = 14 [(.google.api.field_behavior) = OUTPUT_ONLY, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the backupSchedules at the given index. + */ + com.google.protobuf.ByteString getBackupSchedulesBytes(int index); + + /** + * + * + *
    +   * Output only. Populated only for backups in an incremental backup chain.
    +   * Backups share the same chain id if and only if they belong to the same
    +   * incremental backup chain. Use this field to determine which backups are
    +   * part of the same incremental backup chain. The ordering of backups in the
    +   * chain can be determined by ordering the backup `version_time`.
    +   * 
    + * + * string incremental_backup_chain_id = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The incrementalBackupChainId. + */ + java.lang.String getIncrementalBackupChainId(); + /** + * + * + *
    +   * Output only. Populated only for backups in an incremental backup chain.
    +   * Backups share the same chain id if and only if they belong to the same
    +   * incremental backup chain. Use this field to determine which backups are
    +   * part of the same incremental backup chain. The ordering of backups in the
    +   * chain can be determined by ordering the backup `version_time`.
    +   * 
    + * + * string incremental_backup_chain_id = 17 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The bytes for incrementalBackupChainId. + */ + com.google.protobuf.ByteString getIncrementalBackupChainIdBytes(); + + /** + * + * + *
    +   * Output only. Data deleted at a time older than this is guaranteed not to be
    +   * retained in order to support this backup. For a backup in an incremental
    +   * backup chain, this is the version time of the oldest backup that exists or
    +   * ever existed in the chain. For all other backups, this is the version time
    +   * of the backup. This field can be used to understand what data is being
    +   * retained by the backup system.
    +   * 
    + * + * + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the oldestVersionTime field is set. + */ + boolean hasOldestVersionTime(); + /** + * + * + *
    +   * Output only. Data deleted at a time older than this is guaranteed not to be
    +   * retained in order to support this backup. For a backup in an incremental
    +   * backup chain, this is the version time of the oldest backup that exists or
    +   * ever existed in the chain. For all other backups, this is the version time
    +   * of the backup. This field can be used to understand what data is being
    +   * retained by the backup system.
    +   * 
    + * + * + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The oldestVersionTime. + */ + com.google.protobuf.Timestamp getOldestVersionTime(); + /** + * + * + *
    +   * Output only. Data deleted at a time older than this is guaranteed not to be
    +   * retained in order to support this backup. For a backup in an incremental
    +   * backup chain, this is the version time of the oldest backup that exists or
    +   * ever existed in the chain. For all other backups, this is the version time
    +   * of the backup. This field can be used to understand what data is being
    +   * retained by the backup system.
    +   * 
    + * + * + * .google.protobuf.Timestamp oldest_version_time = 18 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getOldestVersionTimeOrBuilder(); } diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupProto.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupProto.java index 2901738e77a..2d095ff5420 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupProto.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupProto.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public final class BackupProto { @@ -88,6 +88,14 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_spanner_admin_database_v1_CopyBackupEncryptionConfig_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_spanner_admin_database_v1_CopyBackupEncryptionConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_FullBackupSpec_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_FullBackupSpec_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_IncrementalBackupSpec_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_IncrementalBackupSpec_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; @@ -104,105 +112,117 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "g/operations.proto\032 google/protobuf/fiel" + "d_mask.proto\032\037google/protobuf/timestamp." + "proto\032-google/spanner/admin/database/v1/" - + "common.proto\"\303\006\n\006Backup\0226\n\010database\030\002 \001(" + + "common.proto\"\220\t\n\006Backup\0226\n\010database\030\002 \001(" + "\tB$\372A!\n\037spanner.googleapis.com/Database\022" + "0\n\014version_time\030\t \001(\0132\032.google.protobuf." + "Timestamp\022/\n\013expire_time\030\003 \001(\0132\032.google." + "protobuf.Timestamp\022\014\n\004name\030\001 \001(\t\0224\n\013crea" + "te_time\030\004 \001(\0132\032.google.protobuf.Timestam" - + "pB\003\340A\003\022\027\n\nsize_bytes\030\005 \001(\003B\003\340A\003\022B\n\005state" - + "\030\006 \001(\0162..google.spanner.admin.database.v" - + "1.Backup.StateB\003\340A\003\022F\n\025referencing_datab" - + "ases\030\007 \003(\tB\'\340A\003\372A!\n\037spanner.googleapis.c" - + "om/Database\022N\n\017encryption_info\030\010 \001(\01320.g" - + "oogle.spanner.admin.database.v1.Encrypti" - + "onInfoB\003\340A\003\022P\n\020database_dialect\030\n \001(\01621." - + "google.spanner.admin.database.v1.Databas" - + "eDialectB\003\340A\003\022B\n\023referencing_backups\030\013 \003" - + "(\tB%\340A\003\372A\037\n\035spanner.googleapis.com/Backu" - + "p\0228\n\017max_expire_time\030\014 \001(\0132\032.google.prot" - + "obuf.TimestampB\003\340A\003\"7\n\005State\022\025\n\021STATE_UN" - + "SPECIFIED\020\000\022\014\n\010CREATING\020\001\022\t\n\005READY\020\002:\\\352A" - + "Y\n\035spanner.googleapis.com/Backup\0228projec" - + "ts/{project}/instances/{instance}/backup" - + "s/{backup}\"\205\002\n\023CreateBackupRequest\0227\n\006pa" - + "rent\030\001 \001(\tB\'\340A\002\372A!\n\037spanner.googleapis.c" - + "om/Instance\022\026\n\tbackup_id\030\002 \001(\tB\003\340A\002\022=\n\006b" - + "ackup\030\003 \001(\0132(.google.spanner.admin.datab" - + "ase.v1.BackupB\003\340A\002\022^\n\021encryption_config\030" - + "\004 \001(\0132>.google.spanner.admin.database.v1" - + ".CreateBackupEncryptionConfigB\003\340A\001\"\370\001\n\024C" - + "reateBackupMetadata\0220\n\004name\030\001 \001(\tB\"\372A\037\n\035" - + "spanner.googleapis.com/Backup\0226\n\010databas" - + "e\030\002 \001(\tB$\372A!\n\037spanner.googleapis.com/Dat" - + "abase\022E\n\010progress\030\003 \001(\01323.google.spanner" - + ".admin.database.v1.OperationProgress\022/\n\013" - + "cancel_time\030\004 \001(\0132\032.google.protobuf.Time" - + "stamp\"\266\002\n\021CopyBackupRequest\0227\n\006parent\030\001 " + + "pB\003\340A\003\022\027\n\nsize_bytes\030\005 \001(\003B\003\340A\003\022 \n\023freea" + + "ble_size_bytes\030\017 \001(\003B\003\340A\003\022!\n\024exclusive_s" + + "ize_bytes\030\020 \001(\003B\003\340A\003\022B\n\005state\030\006 \001(\0162..go" + + "ogle.spanner.admin.database.v1.Backup.St" + + "ateB\003\340A\003\022F\n\025referencing_databases\030\007 \003(\tB" + + "\'\340A\003\372A!\n\037spanner.googleapis.com/Database" + + "\022N\n\017encryption_info\030\010 \001(\01320.google.spann" + + "er.admin.database.v1.EncryptionInfoB\003\340A\003" + + "\022U\n\026encryption_information\030\r \003(\01320.googl" + + "e.spanner.admin.database.v1.EncryptionIn" + + "foB\003\340A\003\022P\n\020database_dialect\030\n \001(\01621.goog" + + "le.spanner.admin.database.v1.DatabaseDia" + + "lectB\003\340A\003\022B\n\023referencing_backups\030\013 \003(\tB%" + + "\340A\003\372A\037\n\035spanner.googleapis.com/Backup\0228\n" + + "\017max_expire_time\030\014 \001(\0132\032.google.protobuf" + + ".TimestampB\003\340A\003\022G\n\020backup_schedules\030\016 \003(" + + "\tB-\340A\003\372A\'\n%spanner.googleapis.com/Backup" + + "Schedule\022(\n\033incremental_backup_chain_id\030" + + "\021 \001(\tB\003\340A\003\022<\n\023oldest_version_time\030\022 \001(\0132" + + "\032.google.protobuf.TimestampB\003\340A\003\"7\n\005Stat" + + "e\022\025\n\021STATE_UNSPECIFIED\020\000\022\014\n\010CREATING\020\001\022\t" + + "\n\005READY\020\002:\\\352AY\n\035spanner.googleapis.com/B" + + "ackup\0228projects/{project}/instances/{ins" + + "tance}/backups/{backup}\"\205\002\n\023CreateBackup" + + "Request\0227\n\006parent\030\001 \001(\tB\'\340A\002\372A!\n\037spanner" + + ".googleapis.com/Instance\022\026\n\tbackup_id\030\002 " + + "\001(\tB\003\340A\002\022=\n\006backup\030\003 \001(\0132(.google.spanne" + + "r.admin.database.v1.BackupB\003\340A\002\022^\n\021encry" + + "ption_config\030\004 \001(\0132>.google.spanner.admi" + + "n.database.v1.CreateBackupEncryptionConf" + + "igB\003\340A\001\"\370\001\n\024CreateBackupMetadata\0220\n\004name" + + "\030\001 \001(\tB\"\372A\037\n\035spanner.googleapis.com/Back" + + "up\0226\n\010database\030\002 \001(\tB$\372A!\n\037spanner.googl" + + "eapis.com/Database\022E\n\010progress\030\003 \001(\01323.g" + + "oogle.spanner.admin.database.v1.Operatio" + + "nProgress\022/\n\013cancel_time\030\004 \001(\0132\032.google." + + "protobuf.Timestamp\"\266\002\n\021CopyBackupRequest" + + "\0227\n\006parent\030\001 \001(\tB\'\340A\002\372A!\n\037spanner.google" + + "apis.com/Instance\022\026\n\tbackup_id\030\002 \001(\tB\003\340A" + + "\002\022<\n\rsource_backup\030\003 \001(\tB%\340A\002\372A\037\n\035spanne" + + "r.googleapis.com/Backup\0224\n\013expire_time\030\004" + + " \001(\0132\032.google.protobuf.TimestampB\003\340A\002\022\\\n" + + "\021encryption_config\030\005 \001(\0132<.google.spanne" + + "r.admin.database.v1.CopyBackupEncryption" + + "ConfigB\003\340A\001\"\371\001\n\022CopyBackupMetadata\0220\n\004na" + + "me\030\001 \001(\tB\"\372A\037\n\035spanner.googleapis.com/Ba" + + "ckup\0229\n\rsource_backup\030\002 \001(\tB\"\372A\037\n\035spanne" + + "r.googleapis.com/Backup\022E\n\010progress\030\003 \001(" + + "\01323.google.spanner.admin.database.v1.Ope" + + "rationProgress\022/\n\013cancel_time\030\004 \001(\0132\032.go" + + "ogle.protobuf.Timestamp\"\212\001\n\023UpdateBackup" + + "Request\022=\n\006backup\030\001 \001(\0132(.google.spanner" + + ".admin.database.v1.BackupB\003\340A\002\0224\n\013update" + + "_mask\030\002 \001(\0132\032.google.protobuf.FieldMaskB" + + "\003\340A\002\"G\n\020GetBackupRequest\0223\n\004name\030\001 \001(\tB%" + + "\340A\002\372A\037\n\035spanner.googleapis.com/Backup\"J\n" + + "\023DeleteBackupRequest\0223\n\004name\030\001 \001(\tB%\340A\002\372" + + "A\037\n\035spanner.googleapis.com/Backup\"\204\001\n\022Li" + + "stBackupsRequest\0227\n\006parent\030\001 \001(\tB\'\340A\002\372A!" + + "\n\037spanner.googleapis.com/Instance\022\016\n\006fil" + + "ter\030\002 \001(\t\022\021\n\tpage_size\030\003 \001(\005\022\022\n\npage_tok" + + "en\030\004 \001(\t\"i\n\023ListBackupsResponse\0229\n\007backu" + + "ps\030\001 \003(\0132(.google.spanner.admin.database" + + ".v1.Backup\022\027\n\017next_page_token\030\002 \001(\t\"\215\001\n\033" + + "ListBackupOperationsRequest\0227\n\006parent\030\001 " + "\001(\tB\'\340A\002\372A!\n\037spanner.googleapis.com/Inst" - + "ance\022\026\n\tbackup_id\030\002 \001(\tB\003\340A\002\022<\n\rsource_b" - + "ackup\030\003 \001(\tB%\340A\002\372A\037\n\035spanner.googleapis." - + "com/Backup\0224\n\013expire_time\030\004 \001(\0132\032.google" - + ".protobuf.TimestampB\003\340A\002\022\\\n\021encryption_c" - + "onfig\030\005 \001(\0132<.google.spanner.admin.datab" - + "ase.v1.CopyBackupEncryptionConfigB\003\340A\001\"\371" - + "\001\n\022CopyBackupMetadata\0220\n\004name\030\001 \001(\tB\"\372A\037" - + "\n\035spanner.googleapis.com/Backup\0229\n\rsourc" - + "e_backup\030\002 \001(\tB\"\372A\037\n\035spanner.googleapis." - + "com/Backup\022E\n\010progress\030\003 \001(\01323.google.sp" - + "anner.admin.database.v1.OperationProgres" - + "s\022/\n\013cancel_time\030\004 \001(\0132\032.google.protobuf" - + ".Timestamp\"\212\001\n\023UpdateBackupRequest\022=\n\006ba" - + "ckup\030\001 \001(\0132(.google.spanner.admin.databa" - + "se.v1.BackupB\003\340A\002\0224\n\013update_mask\030\002 \001(\0132\032" - + ".google.protobuf.FieldMaskB\003\340A\002\"G\n\020GetBa" - + "ckupRequest\0223\n\004name\030\001 \001(\tB%\340A\002\372A\037\n\035spann" - + "er.googleapis.com/Backup\"J\n\023DeleteBackup" - + "Request\0223\n\004name\030\001 \001(\tB%\340A\002\372A\037\n\035spanner.g" - + "oogleapis.com/Backup\"\204\001\n\022ListBackupsRequ" - + "est\0227\n\006parent\030\001 \001(\tB\'\340A\002\372A!\n\037spanner.goo" - + "gleapis.com/Instance\022\016\n\006filter\030\002 \001(\t\022\021\n\t" - + "page_size\030\003 \001(\005\022\022\n\npage_token\030\004 \001(\t\"i\n\023L" - + "istBackupsResponse\0229\n\007backups\030\001 \003(\0132(.go" - + "ogle.spanner.admin.database.v1.Backup\022\027\n" - + "\017next_page_token\030\002 \001(\t\"\215\001\n\033ListBackupOpe" - + "rationsRequest\0227\n\006parent\030\001 \001(\tB\'\340A\002\372A!\n\037" - + "spanner.googleapis.com/Instance\022\016\n\006filte" - + "r\030\002 \001(\t\022\021\n\tpage_size\030\003 \001(\005\022\022\n\npage_token" - + "\030\004 \001(\t\"j\n\034ListBackupOperationsResponse\0221" - + "\n\noperations\030\001 \003(\0132\035.google.longrunning." - + "Operation\022\027\n\017next_page_token\030\002 \001(\t\"\342\001\n\nB" - + "ackupInfo\0222\n\006backup\030\001 \001(\tB\"\372A\037\n\035spanner." - + "googleapis.com/Backup\0220\n\014version_time\030\004 " - + "\001(\0132\032.google.protobuf.Timestamp\022/\n\013creat" - + "e_time\030\002 \001(\0132\032.google.protobuf.Timestamp" - + "\022=\n\017source_database\030\003 \001(\tB$\372A!\n\037spanner." - + "googleapis.com/Database\"\335\002\n\034CreateBackup" - + "EncryptionConfig\022k\n\017encryption_type\030\001 \001(" - + "\0162M.google.spanner.admin.database.v1.Cre" - + "ateBackupEncryptionConfig.EncryptionType" - + "B\003\340A\002\022?\n\014kms_key_name\030\002 \001(\tB)\340A\001\372A#\n!clo" - + "udkms.googleapis.com/CryptoKey\"\216\001\n\016Encry" - + "ptionType\022\037\n\033ENCRYPTION_TYPE_UNSPECIFIED" - + "\020\000\022\033\n\027USE_DATABASE_ENCRYPTION\020\001\022\035\n\031GOOGL" - + "E_DEFAULT_ENCRYPTION\020\002\022\037\n\033CUSTOMER_MANAG" - + "ED_ENCRYPTION\020\003\"\351\002\n\032CopyBackupEncryption" - + "Config\022i\n\017encryption_type\030\001 \001(\0162K.google" - + ".spanner.admin.database.v1.CopyBackupEnc" - + "ryptionConfig.EncryptionTypeB\003\340A\002\022?\n\014kms" - + "_key_name\030\002 \001(\tB)\340A\001\372A#\n!cloudkms.google" - + "apis.com/CryptoKey\"\236\001\n\016EncryptionType\022\037\n" - + "\033ENCRYPTION_TYPE_UNSPECIFIED\020\000\022+\n\'USE_CO" - + "NFIG_DEFAULT_OR_BACKUP_ENCRYPTION\020\001\022\035\n\031G" - + "OOGLE_DEFAULT_ENCRYPTION\020\002\022\037\n\033CUSTOMER_M" - + "ANAGED_ENCRYPTION\020\003B\375\001\n$com.google.spann" - + "er.admin.database.v1B\013BackupProtoP\001ZFclo" - + "ud.google.com/go/spanner/admin/database/" - + "apiv1/databasepb;databasepb\252\002&Google.Clo" - + "ud.Spanner.Admin.Database.V1\312\002&Google\\Cl" - + "oud\\Spanner\\Admin\\Database\\V1\352\002+Google::" - + "Cloud::Spanner::Admin::Database::V1b\006pro" - + "to3" + + "ance\022\016\n\006filter\030\002 \001(\t\022\021\n\tpage_size\030\003 \001(\005\022" + + "\022\n\npage_token\030\004 \001(\t\"j\n\034ListBackupOperati" + + "onsResponse\0221\n\noperations\030\001 \003(\0132\035.google" + + ".longrunning.Operation\022\027\n\017next_page_toke" + + "n\030\002 \001(\t\"\342\001\n\nBackupInfo\0222\n\006backup\030\001 \001(\tB\"" + + "\372A\037\n\035spanner.googleapis.com/Backup\0220\n\014ve" + + "rsion_time\030\004 \001(\0132\032.google.protobuf.Times" + + "tamp\022/\n\013create_time\030\002 \001(\0132\032.google.proto" + + "buf.Timestamp\022=\n\017source_database\030\003 \001(\tB$" + + "\372A!\n\037spanner.googleapis.com/Database\"\237\003\n" + + "\034CreateBackupEncryptionConfig\022k\n\017encrypt" + + "ion_type\030\001 \001(\0162M.google.spanner.admin.da" + + "tabase.v1.CreateBackupEncryptionConfig.E" + + "ncryptionTypeB\003\340A\002\022?\n\014kms_key_name\030\002 \001(\t" + + "B)\340A\001\372A#\n!cloudkms.googleapis.com/Crypto" + + "Key\022@\n\rkms_key_names\030\003 \003(\tB)\340A\001\372A#\n!clou" + + "dkms.googleapis.com/CryptoKey\"\216\001\n\016Encryp" + + "tionType\022\037\n\033ENCRYPTION_TYPE_UNSPECIFIED\020" + + "\000\022\033\n\027USE_DATABASE_ENCRYPTION\020\001\022\035\n\031GOOGLE" + + "_DEFAULT_ENCRYPTION\020\002\022\037\n\033CUSTOMER_MANAGE" + + "D_ENCRYPTION\020\003\"\253\003\n\032CopyBackupEncryptionC" + + "onfig\022i\n\017encryption_type\030\001 \001(\0162K.google." + + "spanner.admin.database.v1.CopyBackupEncr" + + "yptionConfig.EncryptionTypeB\003\340A\002\022?\n\014kms_" + + "key_name\030\002 \001(\tB)\340A\001\372A#\n!cloudkms.googlea" + + "pis.com/CryptoKey\022@\n\rkms_key_names\030\003 \003(\t" + + "B)\340A\001\372A#\n!cloudkms.googleapis.com/Crypto" + + "Key\"\236\001\n\016EncryptionType\022\037\n\033ENCRYPTION_TYP" + + "E_UNSPECIFIED\020\000\022+\n\'USE_CONFIG_DEFAULT_OR" + + "_BACKUP_ENCRYPTION\020\001\022\035\n\031GOOGLE_DEFAULT_E" + + "NCRYPTION\020\002\022\037\n\033CUSTOMER_MANAGED_ENCRYPTI" + + "ON\020\003\"\020\n\016FullBackupSpec\"\027\n\025IncrementalBac" + + "kupSpecB\375\001\n$com.google.spanner.admin.dat" + + "abase.v1B\013BackupProtoP\001ZFcloud.google.co" + + "m/go/spanner/admin/database/apiv1/databa" + + "sepb;databasepb\252\002&Google.Cloud.Spanner.A" + + "dmin.Database.V1\312\002&Google\\Cloud\\Spanner\\" + + "Admin\\Database\\V1\352\002+Google::Cloud::Spann" + + "er::Admin::Database::V1b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -227,12 +247,18 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Name", "CreateTime", "SizeBytes", + "FreeableSizeBytes", + "ExclusiveSizeBytes", "State", "ReferencingDatabases", "EncryptionInfo", + "EncryptionInformation", "DatabaseDialect", "ReferencingBackups", "MaxExpireTime", + "BackupSchedules", + "IncrementalBackupChainId", + "OldestVersionTime", }); internal_static_google_spanner_admin_database_v1_CreateBackupRequest_descriptor = getDescriptor().getMessageTypes().get(1); @@ -336,7 +362,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_database_v1_CreateBackupEncryptionConfig_descriptor, new java.lang.String[] { - "EncryptionType", "KmsKeyName", + "EncryptionType", "KmsKeyName", "KmsKeyNames", }); internal_static_google_spanner_admin_database_v1_CopyBackupEncryptionConfig_descriptor = getDescriptor().getMessageTypes().get(14); @@ -344,8 +370,20 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_database_v1_CopyBackupEncryptionConfig_descriptor, new java.lang.String[] { - "EncryptionType", "KmsKeyName", + "EncryptionType", "KmsKeyName", "KmsKeyNames", }); + internal_static_google_spanner_admin_database_v1_FullBackupSpec_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_google_spanner_admin_database_v1_FullBackupSpec_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_FullBackupSpec_descriptor, + new java.lang.String[] {}); + internal_static_google_spanner_admin_database_v1_IncrementalBackupSpec_descriptor = + getDescriptor().getMessageTypes().get(16); + internal_static_google_spanner_admin_database_v1_IncrementalBackupSpec_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_IncrementalBackupSpec_descriptor, + new java.lang.String[] {}); com.google.protobuf.ExtensionRegistry registry = com.google.protobuf.ExtensionRegistry.newInstance(); registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupSchedule.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupSchedule.java new file mode 100644 index 00000000000..71c543a23f1 --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupSchedule.java @@ -0,0 +1,2652 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/backup_schedule.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * BackupSchedule expresses the automated backup creation specification for a
    + * Spanner database.
    + * Next ID: 10
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.BackupSchedule} + */ +public final class BackupSchedule extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.BackupSchedule) + BackupScheduleOrBuilder { + private static final long serialVersionUID = 0L; + // Use BackupSchedule.newBuilder() to construct. + private BackupSchedule(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BackupSchedule() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new BackupSchedule(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_BackupSchedule_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_BackupSchedule_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.BackupSchedule.class, + com.google.spanner.admin.database.v1.BackupSchedule.Builder.class); + } + + private int bitField0_; + private int backupTypeSpecCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object backupTypeSpec_; + + public enum BackupTypeSpecCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + FULL_BACKUP_SPEC(7), + INCREMENTAL_BACKUP_SPEC(8), + BACKUPTYPESPEC_NOT_SET(0); + private final int value; + + private BackupTypeSpecCase(int value) { + this.value = value; + } + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static BackupTypeSpecCase valueOf(int value) { + return forNumber(value); + } + + public static BackupTypeSpecCase forNumber(int value) { + switch (value) { + case 7: + return FULL_BACKUP_SPEC; + case 8: + return INCREMENTAL_BACKUP_SPEC; + case 0: + return BACKUPTYPESPEC_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public BackupTypeSpecCase getBackupTypeSpecCase() { + return BackupTypeSpecCase.forNumber(backupTypeSpecCase_); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + /** + * + * + *
    +   * Identifier. Output only for the
    +   * [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation.
    +   * Required for the
    +   * [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]
    +   * operation. A globally unique identifier for the backup schedule which
    +   * cannot be changed. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]`
    +   * The final segment of the name must be between 2 and 60 characters in
    +   * length.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
    +   * Identifier. Output only for the
    +   * [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation.
    +   * Required for the
    +   * [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]
    +   * operation. A globally unique identifier for the backup schedule which
    +   * cannot be changed. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]`
    +   * The final segment of the name must be between 2 and 60 characters in
    +   * length.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SPEC_FIELD_NUMBER = 6; + private com.google.spanner.admin.database.v1.BackupScheduleSpec spec_; + /** + * + * + *
    +   * Optional. The schedule specification based on which the backup creations
    +   * are triggered.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the spec field is set. + */ + @java.lang.Override + public boolean hasSpec() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + *
    +   * Optional. The schedule specification based on which the backup creations
    +   * are triggered.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The spec. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupScheduleSpec getSpec() { + return spec_ == null + ? com.google.spanner.admin.database.v1.BackupScheduleSpec.getDefaultInstance() + : spec_; + } + /** + * + * + *
    +   * Optional. The schedule specification based on which the backup creations
    +   * are triggered.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupScheduleSpecOrBuilder getSpecOrBuilder() { + return spec_ == null + ? com.google.spanner.admin.database.v1.BackupScheduleSpec.getDefaultInstance() + : spec_; + } + + public static final int RETENTION_DURATION_FIELD_NUMBER = 3; + private com.google.protobuf.Duration retentionDuration_; + /** + * + * + *
    +   * Optional. The retention duration of a backup that must be at least 6 hours
    +   * and at most 366 days. The backup is eligible to be automatically deleted
    +   * once the retention period has elapsed.
    +   * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the retentionDuration field is set. + */ + @java.lang.Override + public boolean hasRetentionDuration() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * + * + *
    +   * Optional. The retention duration of a backup that must be at least 6 hours
    +   * and at most 366 days. The backup is eligible to be automatically deleted
    +   * once the retention period has elapsed.
    +   * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The retentionDuration. + */ + @java.lang.Override + public com.google.protobuf.Duration getRetentionDuration() { + return retentionDuration_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : retentionDuration_; + } + /** + * + * + *
    +   * Optional. The retention duration of a backup that must be at least 6 hours
    +   * and at most 366 days. The backup is eligible to be automatically deleted
    +   * once the retention period has elapsed.
    +   * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.protobuf.DurationOrBuilder getRetentionDurationOrBuilder() { + return retentionDuration_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : retentionDuration_; + } + + public static final int ENCRYPTION_CONFIG_FIELD_NUMBER = 4; + private com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryptionConfig_; + /** + * + * + *
    +   * Optional. The encryption configuration that will be used to encrypt the
    +   * backup. If this field is not specified, the backup will use the same
    +   * encryption configuration as the database.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the encryptionConfig field is set. + */ + @java.lang.Override + public boolean hasEncryptionConfig() { + return ((bitField0_ & 0x00000004) != 0); + } + /** + * + * + *
    +   * Optional. The encryption configuration that will be used to encrypt the
    +   * backup. If this field is not specified, the backup will use the same
    +   * encryption configuration as the database.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The encryptionConfig. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig getEncryptionConfig() { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + /** + * + * + *
    +   * Optional. The encryption configuration that will be used to encrypt the
    +   * backup. If this field is not specified, the backup will use the same
    +   * encryption configuration as the database.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupEncryptionConfigOrBuilder + getEncryptionConfigOrBuilder() { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + + public static final int FULL_BACKUP_SPEC_FIELD_NUMBER = 7; + /** + * + * + *
    +   * The schedule creates only full backups.
    +   * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + * + * @return Whether the fullBackupSpec field is set. + */ + @java.lang.Override + public boolean hasFullBackupSpec() { + return backupTypeSpecCase_ == 7; + } + /** + * + * + *
    +   * The schedule creates only full backups.
    +   * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + * + * @return The fullBackupSpec. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.FullBackupSpec getFullBackupSpec() { + if (backupTypeSpecCase_ == 7) { + return (com.google.spanner.admin.database.v1.FullBackupSpec) backupTypeSpec_; + } + return com.google.spanner.admin.database.v1.FullBackupSpec.getDefaultInstance(); + } + /** + * + * + *
    +   * The schedule creates only full backups.
    +   * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.FullBackupSpecOrBuilder getFullBackupSpecOrBuilder() { + if (backupTypeSpecCase_ == 7) { + return (com.google.spanner.admin.database.v1.FullBackupSpec) backupTypeSpec_; + } + return com.google.spanner.admin.database.v1.FullBackupSpec.getDefaultInstance(); + } + + public static final int INCREMENTAL_BACKUP_SPEC_FIELD_NUMBER = 8; + /** + * + * + *
    +   * The schedule creates incremental backup chains.
    +   * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + * + * @return Whether the incrementalBackupSpec field is set. + */ + @java.lang.Override + public boolean hasIncrementalBackupSpec() { + return backupTypeSpecCase_ == 8; + } + /** + * + * + *
    +   * The schedule creates incremental backup chains.
    +   * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + * + * @return The incrementalBackupSpec. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.IncrementalBackupSpec getIncrementalBackupSpec() { + if (backupTypeSpecCase_ == 8) { + return (com.google.spanner.admin.database.v1.IncrementalBackupSpec) backupTypeSpec_; + } + return com.google.spanner.admin.database.v1.IncrementalBackupSpec.getDefaultInstance(); + } + /** + * + * + *
    +   * The schedule creates incremental backup chains.
    +   * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.IncrementalBackupSpecOrBuilder + getIncrementalBackupSpecOrBuilder() { + if (backupTypeSpecCase_ == 8) { + return (com.google.spanner.admin.database.v1.IncrementalBackupSpec) backupTypeSpec_; + } + return com.google.spanner.admin.database.v1.IncrementalBackupSpec.getDefaultInstance(); + } + + public static final int UPDATE_TIME_FIELD_NUMBER = 9; + private com.google.protobuf.Timestamp updateTime_; + /** + * + * + *
    +   * Output only. The timestamp at which the schedule was last updated.
    +   * If the schedule has never been updated, this field contains the timestamp
    +   * when the schedule was first created.
    +   * 
    + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + @java.lang.Override + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00000008) != 0); + } + /** + * + * + *
    +   * Output only. The timestamp at which the schedule was last updated.
    +   * If the schedule has never been updated, this field contains the timestamp
    +   * when the schedule was first created.
    +   * 
    + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getUpdateTime() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + /** + * + * + *
    +   * Output only. The timestamp at which the schedule was last updated.
    +   * If the schedule has never been updated, this field contains the timestamp
    +   * when the schedule was first created.
    +   * 
    + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getRetentionDuration()); + } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(4, getEncryptionConfig()); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(6, getSpec()); + } + if (backupTypeSpecCase_ == 7) { + output.writeMessage(7, (com.google.spanner.admin.database.v1.FullBackupSpec) backupTypeSpec_); + } + if (backupTypeSpecCase_ == 8) { + output.writeMessage( + 8, (com.google.spanner.admin.database.v1.IncrementalBackupSpec) backupTypeSpec_); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeMessage(9, getUpdateTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getRetentionDuration()); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getEncryptionConfig()); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, getSpec()); + } + if (backupTypeSpecCase_ == 7) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 7, (com.google.spanner.admin.database.v1.FullBackupSpec) backupTypeSpec_); + } + if (backupTypeSpecCase_ == 8) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 8, (com.google.spanner.admin.database.v1.IncrementalBackupSpec) backupTypeSpec_); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(9, getUpdateTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.BackupSchedule)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.BackupSchedule other = + (com.google.spanner.admin.database.v1.BackupSchedule) obj; + + if (!getName().equals(other.getName())) return false; + if (hasSpec() != other.hasSpec()) return false; + if (hasSpec()) { + if (!getSpec().equals(other.getSpec())) return false; + } + if (hasRetentionDuration() != other.hasRetentionDuration()) return false; + if (hasRetentionDuration()) { + if (!getRetentionDuration().equals(other.getRetentionDuration())) return false; + } + if (hasEncryptionConfig() != other.hasEncryptionConfig()) return false; + if (hasEncryptionConfig()) { + if (!getEncryptionConfig().equals(other.getEncryptionConfig())) return false; + } + if (hasUpdateTime() != other.hasUpdateTime()) return false; + if (hasUpdateTime()) { + if (!getUpdateTime().equals(other.getUpdateTime())) return false; + } + if (!getBackupTypeSpecCase().equals(other.getBackupTypeSpecCase())) return false; + switch (backupTypeSpecCase_) { + case 7: + if (!getFullBackupSpec().equals(other.getFullBackupSpec())) return false; + break; + case 8: + if (!getIncrementalBackupSpec().equals(other.getIncrementalBackupSpec())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + if (hasSpec()) { + hash = (37 * hash) + SPEC_FIELD_NUMBER; + hash = (53 * hash) + getSpec().hashCode(); + } + if (hasRetentionDuration()) { + hash = (37 * hash) + RETENTION_DURATION_FIELD_NUMBER; + hash = (53 * hash) + getRetentionDuration().hashCode(); + } + if (hasEncryptionConfig()) { + hash = (37 * hash) + ENCRYPTION_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getEncryptionConfig().hashCode(); + } + if (hasUpdateTime()) { + hash = (37 * hash) + UPDATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getUpdateTime().hashCode(); + } + switch (backupTypeSpecCase_) { + case 7: + hash = (37 * hash) + FULL_BACKUP_SPEC_FIELD_NUMBER; + hash = (53 * hash) + getFullBackupSpec().hashCode(); + break; + case 8: + hash = (37 * hash) + INCREMENTAL_BACKUP_SPEC_FIELD_NUMBER; + hash = (53 * hash) + getIncrementalBackupSpec().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.BackupSchedule parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.BackupSchedule parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupSchedule parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.BackupSchedule parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupSchedule parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.BackupSchedule parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupSchedule parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.BackupSchedule parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupSchedule parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.BackupSchedule parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupSchedule parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.BackupSchedule parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.admin.database.v1.BackupSchedule prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
    +   * BackupSchedule expresses the automated backup creation specification for a
    +   * Spanner database.
    +   * Next ID: 10
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.BackupSchedule} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.BackupSchedule) + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_BackupSchedule_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_BackupSchedule_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.BackupSchedule.class, + com.google.spanner.admin.database.v1.BackupSchedule.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.BackupSchedule.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getSpecFieldBuilder(); + getRetentionDurationFieldBuilder(); + getEncryptionConfigFieldBuilder(); + getUpdateTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + spec_ = null; + if (specBuilder_ != null) { + specBuilder_.dispose(); + specBuilder_ = null; + } + retentionDuration_ = null; + if (retentionDurationBuilder_ != null) { + retentionDurationBuilder_.dispose(); + retentionDurationBuilder_ = null; + } + encryptionConfig_ = null; + if (encryptionConfigBuilder_ != null) { + encryptionConfigBuilder_.dispose(); + encryptionConfigBuilder_ = null; + } + if (fullBackupSpecBuilder_ != null) { + fullBackupSpecBuilder_.clear(); + } + if (incrementalBackupSpecBuilder_ != null) { + incrementalBackupSpecBuilder_.clear(); + } + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + backupTypeSpecCase_ = 0; + backupTypeSpec_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_BackupSchedule_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupSchedule getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupSchedule build() { + com.google.spanner.admin.database.v1.BackupSchedule result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupSchedule buildPartial() { + com.google.spanner.admin.database.v1.BackupSchedule result = + new com.google.spanner.admin.database.v1.BackupSchedule(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.BackupSchedule result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.spec_ = specBuilder_ == null ? spec_ : specBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.retentionDuration_ = + retentionDurationBuilder_ == null + ? retentionDuration_ + : retentionDurationBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.encryptionConfig_ = + encryptionConfigBuilder_ == null ? encryptionConfig_ : encryptionConfigBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.updateTime_ = updateTimeBuilder_ == null ? updateTime_ : updateTimeBuilder_.build(); + to_bitField0_ |= 0x00000008; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs(com.google.spanner.admin.database.v1.BackupSchedule result) { + result.backupTypeSpecCase_ = backupTypeSpecCase_; + result.backupTypeSpec_ = this.backupTypeSpec_; + if (backupTypeSpecCase_ == 7 && fullBackupSpecBuilder_ != null) { + result.backupTypeSpec_ = fullBackupSpecBuilder_.build(); + } + if (backupTypeSpecCase_ == 8 && incrementalBackupSpecBuilder_ != null) { + result.backupTypeSpec_ = incrementalBackupSpecBuilder_.build(); + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.BackupSchedule) { + return mergeFrom((com.google.spanner.admin.database.v1.BackupSchedule) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.BackupSchedule other) { + if (other == com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasSpec()) { + mergeSpec(other.getSpec()); + } + if (other.hasRetentionDuration()) { + mergeRetentionDuration(other.getRetentionDuration()); + } + if (other.hasEncryptionConfig()) { + mergeEncryptionConfig(other.getEncryptionConfig()); + } + if (other.hasUpdateTime()) { + mergeUpdateTime(other.getUpdateTime()); + } + switch (other.getBackupTypeSpecCase()) { + case FULL_BACKUP_SPEC: + { + mergeFullBackupSpec(other.getFullBackupSpec()); + break; + } + case INCREMENTAL_BACKUP_SPEC: + { + mergeIncrementalBackupSpec(other.getIncrementalBackupSpec()); + break; + } + case BACKUPTYPESPEC_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 26: + { + input.readMessage( + getRetentionDurationFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 34: + { + input.readMessage( + getEncryptionConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 + case 50: + { + input.readMessage(getSpecFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 50 + case 58: + { + input.readMessage(getFullBackupSpecFieldBuilder().getBuilder(), extensionRegistry); + backupTypeSpecCase_ = 7; + break; + } // case 58 + case 66: + { + input.readMessage( + getIncrementalBackupSpecFieldBuilder().getBuilder(), extensionRegistry); + backupTypeSpecCase_ = 8; + break; + } // case 66 + case 74: + { + input.readMessage(getUpdateTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000040; + break; + } // case 74 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int backupTypeSpecCase_ = 0; + private java.lang.Object backupTypeSpec_; + + public BackupTypeSpecCase getBackupTypeSpecCase() { + return BackupTypeSpecCase.forNumber(backupTypeSpecCase_); + } + + public Builder clearBackupTypeSpec() { + backupTypeSpecCase_ = 0; + backupTypeSpec_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + /** + * + * + *
    +     * Identifier. Output only for the
    +     * [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation.
    +     * Required for the
    +     * [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]
    +     * operation. A globally unique identifier for the backup schedule which
    +     * cannot be changed. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]`
    +     * The final segment of the name must be between 2 and 60 characters in
    +     * length.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
    +     * Identifier. Output only for the
    +     * [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation.
    +     * Required for the
    +     * [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]
    +     * operation. A globally unique identifier for the backup schedule which
    +     * cannot be changed. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]`
    +     * The final segment of the name must be between 2 and 60 characters in
    +     * length.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
    +     * Identifier. Output only for the
    +     * [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation.
    +     * Required for the
    +     * [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]
    +     * operation. A globally unique identifier for the backup schedule which
    +     * cannot be changed. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]`
    +     * The final segment of the name must be between 2 and 60 characters in
    +     * length.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
    +     * Identifier. Output only for the
    +     * [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation.
    +     * Required for the
    +     * [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]
    +     * operation. A globally unique identifier for the backup schedule which
    +     * cannot be changed. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]`
    +     * The final segment of the name must be between 2 and 60 characters in
    +     * length.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
    +     * Identifier. Output only for the
    +     * [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation.
    +     * Required for the
    +     * [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]
    +     * operation. A globally unique identifier for the backup schedule which
    +     * cannot be changed. Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]`
    +     * The final segment of the name must be between 2 and 60 characters in
    +     * length.
    +     * 
    + * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.spanner.admin.database.v1.BackupScheduleSpec spec_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.database.v1.BackupScheduleSpec, + com.google.spanner.admin.database.v1.BackupScheduleSpec.Builder, + com.google.spanner.admin.database.v1.BackupScheduleSpecOrBuilder> + specBuilder_; + /** + * + * + *
    +     * Optional. The schedule specification based on which the backup creations
    +     * are triggered.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the spec field is set. + */ + public boolean hasSpec() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * + * + *
    +     * Optional. The schedule specification based on which the backup creations
    +     * are triggered.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The spec. + */ + public com.google.spanner.admin.database.v1.BackupScheduleSpec getSpec() { + if (specBuilder_ == null) { + return spec_ == null + ? com.google.spanner.admin.database.v1.BackupScheduleSpec.getDefaultInstance() + : spec_; + } else { + return specBuilder_.getMessage(); + } + } + /** + * + * + *
    +     * Optional. The schedule specification based on which the backup creations
    +     * are triggered.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSpec(com.google.spanner.admin.database.v1.BackupScheduleSpec value) { + if (specBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + spec_ = value; + } else { + specBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. The schedule specification based on which the backup creations
    +     * are triggered.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setSpec( + com.google.spanner.admin.database.v1.BackupScheduleSpec.Builder builderForValue) { + if (specBuilder_ == null) { + spec_ = builderForValue.build(); + } else { + specBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. The schedule specification based on which the backup creations
    +     * are triggered.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeSpec(com.google.spanner.admin.database.v1.BackupScheduleSpec value) { + if (specBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && spec_ != null + && spec_ + != com.google.spanner.admin.database.v1.BackupScheduleSpec.getDefaultInstance()) { + getSpecBuilder().mergeFrom(value); + } else { + spec_ = value; + } + } else { + specBuilder_.mergeFrom(value); + } + if (spec_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + /** + * + * + *
    +     * Optional. The schedule specification based on which the backup creations
    +     * are triggered.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearSpec() { + bitField0_ = (bitField0_ & ~0x00000002); + spec_ = null; + if (specBuilder_ != null) { + specBuilder_.dispose(); + specBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. The schedule specification based on which the backup creations
    +     * are triggered.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.database.v1.BackupScheduleSpec.Builder getSpecBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getSpecFieldBuilder().getBuilder(); + } + /** + * + * + *
    +     * Optional. The schedule specification based on which the backup creations
    +     * are triggered.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.database.v1.BackupScheduleSpecOrBuilder getSpecOrBuilder() { + if (specBuilder_ != null) { + return specBuilder_.getMessageOrBuilder(); + } else { + return spec_ == null + ? com.google.spanner.admin.database.v1.BackupScheduleSpec.getDefaultInstance() + : spec_; + } + } + /** + * + * + *
    +     * Optional. The schedule specification based on which the backup creations
    +     * are triggered.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.database.v1.BackupScheduleSpec, + com.google.spanner.admin.database.v1.BackupScheduleSpec.Builder, + com.google.spanner.admin.database.v1.BackupScheduleSpecOrBuilder> + getSpecFieldBuilder() { + if (specBuilder_ == null) { + specBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.database.v1.BackupScheduleSpec, + com.google.spanner.admin.database.v1.BackupScheduleSpec.Builder, + com.google.spanner.admin.database.v1.BackupScheduleSpecOrBuilder>( + getSpec(), getParentForChildren(), isClean()); + spec_ = null; + } + return specBuilder_; + } + + private com.google.protobuf.Duration retentionDuration_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + retentionDurationBuilder_; + /** + * + * + *
    +     * Optional. The retention duration of a backup that must be at least 6 hours
    +     * and at most 366 days. The backup is eligible to be automatically deleted
    +     * once the retention period has elapsed.
    +     * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the retentionDuration field is set. + */ + public boolean hasRetentionDuration() { + return ((bitField0_ & 0x00000004) != 0); + } + /** + * + * + *
    +     * Optional. The retention duration of a backup that must be at least 6 hours
    +     * and at most 366 days. The backup is eligible to be automatically deleted
    +     * once the retention period has elapsed.
    +     * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The retentionDuration. + */ + public com.google.protobuf.Duration getRetentionDuration() { + if (retentionDurationBuilder_ == null) { + return retentionDuration_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : retentionDuration_; + } else { + return retentionDurationBuilder_.getMessage(); + } + } + /** + * + * + *
    +     * Optional. The retention duration of a backup that must be at least 6 hours
    +     * and at most 366 days. The backup is eligible to be automatically deleted
    +     * once the retention period has elapsed.
    +     * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRetentionDuration(com.google.protobuf.Duration value) { + if (retentionDurationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + retentionDuration_ = value; + } else { + retentionDurationBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. The retention duration of a backup that must be at least 6 hours
    +     * and at most 366 days. The backup is eligible to be automatically deleted
    +     * once the retention period has elapsed.
    +     * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setRetentionDuration(com.google.protobuf.Duration.Builder builderForValue) { + if (retentionDurationBuilder_ == null) { + retentionDuration_ = builderForValue.build(); + } else { + retentionDurationBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. The retention duration of a backup that must be at least 6 hours
    +     * and at most 366 days. The backup is eligible to be automatically deleted
    +     * once the retention period has elapsed.
    +     * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeRetentionDuration(com.google.protobuf.Duration value) { + if (retentionDurationBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && retentionDuration_ != null + && retentionDuration_ != com.google.protobuf.Duration.getDefaultInstance()) { + getRetentionDurationBuilder().mergeFrom(value); + } else { + retentionDuration_ = value; + } + } else { + retentionDurationBuilder_.mergeFrom(value); + } + if (retentionDuration_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + /** + * + * + *
    +     * Optional. The retention duration of a backup that must be at least 6 hours
    +     * and at most 366 days. The backup is eligible to be automatically deleted
    +     * once the retention period has elapsed.
    +     * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearRetentionDuration() { + bitField0_ = (bitField0_ & ~0x00000004); + retentionDuration_ = null; + if (retentionDurationBuilder_ != null) { + retentionDurationBuilder_.dispose(); + retentionDurationBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. The retention duration of a backup that must be at least 6 hours
    +     * and at most 366 days. The backup is eligible to be automatically deleted
    +     * once the retention period has elapsed.
    +     * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.Duration.Builder getRetentionDurationBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getRetentionDurationFieldBuilder().getBuilder(); + } + /** + * + * + *
    +     * Optional. The retention duration of a backup that must be at least 6 hours
    +     * and at most 366 days. The backup is eligible to be automatically deleted
    +     * once the retention period has elapsed.
    +     * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.DurationOrBuilder getRetentionDurationOrBuilder() { + if (retentionDurationBuilder_ != null) { + return retentionDurationBuilder_.getMessageOrBuilder(); + } else { + return retentionDuration_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : retentionDuration_; + } + } + /** + * + * + *
    +     * Optional. The retention duration of a backup that must be at least 6 hours
    +     * and at most 366 days. The backup is eligible to be automatically deleted
    +     * once the retention period has elapsed.
    +     * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + getRetentionDurationFieldBuilder() { + if (retentionDurationBuilder_ == null) { + retentionDurationBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder>( + getRetentionDuration(), getParentForChildren(), isClean()); + retentionDuration_ = null; + } + return retentionDurationBuilder_; + } + + private com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryptionConfig_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig, + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.Builder, + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfigOrBuilder> + encryptionConfigBuilder_; + /** + * + * + *
    +     * Optional. The encryption configuration that will be used to encrypt the
    +     * backup. If this field is not specified, the backup will use the same
    +     * encryption configuration as the database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the encryptionConfig field is set. + */ + public boolean hasEncryptionConfig() { + return ((bitField0_ & 0x00000008) != 0); + } + /** + * + * + *
    +     * Optional. The encryption configuration that will be used to encrypt the
    +     * backup. If this field is not specified, the backup will use the same
    +     * encryption configuration as the database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The encryptionConfig. + */ + public com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig getEncryptionConfig() { + if (encryptionConfigBuilder_ == null) { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } else { + return encryptionConfigBuilder_.getMessage(); + } + } + /** + * + * + *
    +     * Optional. The encryption configuration that will be used to encrypt the
    +     * backup. If this field is not specified, the backup will use the same
    +     * encryption configuration as the database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setEncryptionConfig( + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig value) { + if (encryptionConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + encryptionConfig_ = value; + } else { + encryptionConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. The encryption configuration that will be used to encrypt the
    +     * backup. If this field is not specified, the backup will use the same
    +     * encryption configuration as the database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setEncryptionConfig( + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.Builder builderForValue) { + if (encryptionConfigBuilder_ == null) { + encryptionConfig_ = builderForValue.build(); + } else { + encryptionConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. The encryption configuration that will be used to encrypt the
    +     * backup. If this field is not specified, the backup will use the same
    +     * encryption configuration as the database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeEncryptionConfig( + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig value) { + if (encryptionConfigBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && encryptionConfig_ != null + && encryptionConfig_ + != com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig + .getDefaultInstance()) { + getEncryptionConfigBuilder().mergeFrom(value); + } else { + encryptionConfig_ = value; + } + } else { + encryptionConfigBuilder_.mergeFrom(value); + } + if (encryptionConfig_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + /** + * + * + *
    +     * Optional. The encryption configuration that will be used to encrypt the
    +     * backup. If this field is not specified, the backup will use the same
    +     * encryption configuration as the database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearEncryptionConfig() { + bitField0_ = (bitField0_ & ~0x00000008); + encryptionConfig_ = null; + if (encryptionConfigBuilder_ != null) { + encryptionConfigBuilder_.dispose(); + encryptionConfigBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. The encryption configuration that will be used to encrypt the
    +     * backup. If this field is not specified, the backup will use the same
    +     * encryption configuration as the database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.Builder + getEncryptionConfigBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getEncryptionConfigFieldBuilder().getBuilder(); + } + /** + * + * + *
    +     * Optional. The encryption configuration that will be used to encrypt the
    +     * backup. If this field is not specified, the backup will use the same
    +     * encryption configuration as the database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.database.v1.CreateBackupEncryptionConfigOrBuilder + getEncryptionConfigOrBuilder() { + if (encryptionConfigBuilder_ != null) { + return encryptionConfigBuilder_.getMessageOrBuilder(); + } else { + return encryptionConfig_ == null + ? com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.getDefaultInstance() + : encryptionConfig_; + } + } + /** + * + * + *
    +     * Optional. The encryption configuration that will be used to encrypt the
    +     * backup. If this field is not specified, the backup will use the same
    +     * encryption configuration as the database.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig, + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.Builder, + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfigOrBuilder> + getEncryptionConfigFieldBuilder() { + if (encryptionConfigBuilder_ == null) { + encryptionConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig, + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig.Builder, + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfigOrBuilder>( + getEncryptionConfig(), getParentForChildren(), isClean()); + encryptionConfig_ = null; + } + return encryptionConfigBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.database.v1.FullBackupSpec, + com.google.spanner.admin.database.v1.FullBackupSpec.Builder, + com.google.spanner.admin.database.v1.FullBackupSpecOrBuilder> + fullBackupSpecBuilder_; + /** + * + * + *
    +     * The schedule creates only full backups.
    +     * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + * + * @return Whether the fullBackupSpec field is set. + */ + @java.lang.Override + public boolean hasFullBackupSpec() { + return backupTypeSpecCase_ == 7; + } + /** + * + * + *
    +     * The schedule creates only full backups.
    +     * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + * + * @return The fullBackupSpec. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.FullBackupSpec getFullBackupSpec() { + if (fullBackupSpecBuilder_ == null) { + if (backupTypeSpecCase_ == 7) { + return (com.google.spanner.admin.database.v1.FullBackupSpec) backupTypeSpec_; + } + return com.google.spanner.admin.database.v1.FullBackupSpec.getDefaultInstance(); + } else { + if (backupTypeSpecCase_ == 7) { + return fullBackupSpecBuilder_.getMessage(); + } + return com.google.spanner.admin.database.v1.FullBackupSpec.getDefaultInstance(); + } + } + /** + * + * + *
    +     * The schedule creates only full backups.
    +     * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + */ + public Builder setFullBackupSpec(com.google.spanner.admin.database.v1.FullBackupSpec value) { + if (fullBackupSpecBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + backupTypeSpec_ = value; + onChanged(); + } else { + fullBackupSpecBuilder_.setMessage(value); + } + backupTypeSpecCase_ = 7; + return this; + } + /** + * + * + *
    +     * The schedule creates only full backups.
    +     * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + */ + public Builder setFullBackupSpec( + com.google.spanner.admin.database.v1.FullBackupSpec.Builder builderForValue) { + if (fullBackupSpecBuilder_ == null) { + backupTypeSpec_ = builderForValue.build(); + onChanged(); + } else { + fullBackupSpecBuilder_.setMessage(builderForValue.build()); + } + backupTypeSpecCase_ = 7; + return this; + } + /** + * + * + *
    +     * The schedule creates only full backups.
    +     * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + */ + public Builder mergeFullBackupSpec(com.google.spanner.admin.database.v1.FullBackupSpec value) { + if (fullBackupSpecBuilder_ == null) { + if (backupTypeSpecCase_ == 7 + && backupTypeSpec_ + != com.google.spanner.admin.database.v1.FullBackupSpec.getDefaultInstance()) { + backupTypeSpec_ = + com.google.spanner.admin.database.v1.FullBackupSpec.newBuilder( + (com.google.spanner.admin.database.v1.FullBackupSpec) backupTypeSpec_) + .mergeFrom(value) + .buildPartial(); + } else { + backupTypeSpec_ = value; + } + onChanged(); + } else { + if (backupTypeSpecCase_ == 7) { + fullBackupSpecBuilder_.mergeFrom(value); + } else { + fullBackupSpecBuilder_.setMessage(value); + } + } + backupTypeSpecCase_ = 7; + return this; + } + /** + * + * + *
    +     * The schedule creates only full backups.
    +     * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + */ + public Builder clearFullBackupSpec() { + if (fullBackupSpecBuilder_ == null) { + if (backupTypeSpecCase_ == 7) { + backupTypeSpecCase_ = 0; + backupTypeSpec_ = null; + onChanged(); + } + } else { + if (backupTypeSpecCase_ == 7) { + backupTypeSpecCase_ = 0; + backupTypeSpec_ = null; + } + fullBackupSpecBuilder_.clear(); + } + return this; + } + /** + * + * + *
    +     * The schedule creates only full backups.
    +     * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + */ + public com.google.spanner.admin.database.v1.FullBackupSpec.Builder getFullBackupSpecBuilder() { + return getFullBackupSpecFieldBuilder().getBuilder(); + } + /** + * + * + *
    +     * The schedule creates only full backups.
    +     * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.FullBackupSpecOrBuilder + getFullBackupSpecOrBuilder() { + if ((backupTypeSpecCase_ == 7) && (fullBackupSpecBuilder_ != null)) { + return fullBackupSpecBuilder_.getMessageOrBuilder(); + } else { + if (backupTypeSpecCase_ == 7) { + return (com.google.spanner.admin.database.v1.FullBackupSpec) backupTypeSpec_; + } + return com.google.spanner.admin.database.v1.FullBackupSpec.getDefaultInstance(); + } + } + /** + * + * + *
    +     * The schedule creates only full backups.
    +     * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.database.v1.FullBackupSpec, + com.google.spanner.admin.database.v1.FullBackupSpec.Builder, + com.google.spanner.admin.database.v1.FullBackupSpecOrBuilder> + getFullBackupSpecFieldBuilder() { + if (fullBackupSpecBuilder_ == null) { + if (!(backupTypeSpecCase_ == 7)) { + backupTypeSpec_ = + com.google.spanner.admin.database.v1.FullBackupSpec.getDefaultInstance(); + } + fullBackupSpecBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.database.v1.FullBackupSpec, + com.google.spanner.admin.database.v1.FullBackupSpec.Builder, + com.google.spanner.admin.database.v1.FullBackupSpecOrBuilder>( + (com.google.spanner.admin.database.v1.FullBackupSpec) backupTypeSpec_, + getParentForChildren(), + isClean()); + backupTypeSpec_ = null; + } + backupTypeSpecCase_ = 7; + onChanged(); + return fullBackupSpecBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.database.v1.IncrementalBackupSpec, + com.google.spanner.admin.database.v1.IncrementalBackupSpec.Builder, + com.google.spanner.admin.database.v1.IncrementalBackupSpecOrBuilder> + incrementalBackupSpecBuilder_; + /** + * + * + *
    +     * The schedule creates incremental backup chains.
    +     * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + * + * @return Whether the incrementalBackupSpec field is set. + */ + @java.lang.Override + public boolean hasIncrementalBackupSpec() { + return backupTypeSpecCase_ == 8; + } + /** + * + * + *
    +     * The schedule creates incremental backup chains.
    +     * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + * + * @return The incrementalBackupSpec. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.IncrementalBackupSpec getIncrementalBackupSpec() { + if (incrementalBackupSpecBuilder_ == null) { + if (backupTypeSpecCase_ == 8) { + return (com.google.spanner.admin.database.v1.IncrementalBackupSpec) backupTypeSpec_; + } + return com.google.spanner.admin.database.v1.IncrementalBackupSpec.getDefaultInstance(); + } else { + if (backupTypeSpecCase_ == 8) { + return incrementalBackupSpecBuilder_.getMessage(); + } + return com.google.spanner.admin.database.v1.IncrementalBackupSpec.getDefaultInstance(); + } + } + /** + * + * + *
    +     * The schedule creates incremental backup chains.
    +     * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + */ + public Builder setIncrementalBackupSpec( + com.google.spanner.admin.database.v1.IncrementalBackupSpec value) { + if (incrementalBackupSpecBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + backupTypeSpec_ = value; + onChanged(); + } else { + incrementalBackupSpecBuilder_.setMessage(value); + } + backupTypeSpecCase_ = 8; + return this; + } + /** + * + * + *
    +     * The schedule creates incremental backup chains.
    +     * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + */ + public Builder setIncrementalBackupSpec( + com.google.spanner.admin.database.v1.IncrementalBackupSpec.Builder builderForValue) { + if (incrementalBackupSpecBuilder_ == null) { + backupTypeSpec_ = builderForValue.build(); + onChanged(); + } else { + incrementalBackupSpecBuilder_.setMessage(builderForValue.build()); + } + backupTypeSpecCase_ = 8; + return this; + } + /** + * + * + *
    +     * The schedule creates incremental backup chains.
    +     * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + */ + public Builder mergeIncrementalBackupSpec( + com.google.spanner.admin.database.v1.IncrementalBackupSpec value) { + if (incrementalBackupSpecBuilder_ == null) { + if (backupTypeSpecCase_ == 8 + && backupTypeSpec_ + != com.google.spanner.admin.database.v1.IncrementalBackupSpec + .getDefaultInstance()) { + backupTypeSpec_ = + com.google.spanner.admin.database.v1.IncrementalBackupSpec.newBuilder( + (com.google.spanner.admin.database.v1.IncrementalBackupSpec) backupTypeSpec_) + .mergeFrom(value) + .buildPartial(); + } else { + backupTypeSpec_ = value; + } + onChanged(); + } else { + if (backupTypeSpecCase_ == 8) { + incrementalBackupSpecBuilder_.mergeFrom(value); + } else { + incrementalBackupSpecBuilder_.setMessage(value); + } + } + backupTypeSpecCase_ = 8; + return this; + } + /** + * + * + *
    +     * The schedule creates incremental backup chains.
    +     * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + */ + public Builder clearIncrementalBackupSpec() { + if (incrementalBackupSpecBuilder_ == null) { + if (backupTypeSpecCase_ == 8) { + backupTypeSpecCase_ = 0; + backupTypeSpec_ = null; + onChanged(); + } + } else { + if (backupTypeSpecCase_ == 8) { + backupTypeSpecCase_ = 0; + backupTypeSpec_ = null; + } + incrementalBackupSpecBuilder_.clear(); + } + return this; + } + /** + * + * + *
    +     * The schedule creates incremental backup chains.
    +     * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + */ + public com.google.spanner.admin.database.v1.IncrementalBackupSpec.Builder + getIncrementalBackupSpecBuilder() { + return getIncrementalBackupSpecFieldBuilder().getBuilder(); + } + /** + * + * + *
    +     * The schedule creates incremental backup chains.
    +     * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.IncrementalBackupSpecOrBuilder + getIncrementalBackupSpecOrBuilder() { + if ((backupTypeSpecCase_ == 8) && (incrementalBackupSpecBuilder_ != null)) { + return incrementalBackupSpecBuilder_.getMessageOrBuilder(); + } else { + if (backupTypeSpecCase_ == 8) { + return (com.google.spanner.admin.database.v1.IncrementalBackupSpec) backupTypeSpec_; + } + return com.google.spanner.admin.database.v1.IncrementalBackupSpec.getDefaultInstance(); + } + } + /** + * + * + *
    +     * The schedule creates incremental backup chains.
    +     * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.database.v1.IncrementalBackupSpec, + com.google.spanner.admin.database.v1.IncrementalBackupSpec.Builder, + com.google.spanner.admin.database.v1.IncrementalBackupSpecOrBuilder> + getIncrementalBackupSpecFieldBuilder() { + if (incrementalBackupSpecBuilder_ == null) { + if (!(backupTypeSpecCase_ == 8)) { + backupTypeSpec_ = + com.google.spanner.admin.database.v1.IncrementalBackupSpec.getDefaultInstance(); + } + incrementalBackupSpecBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.database.v1.IncrementalBackupSpec, + com.google.spanner.admin.database.v1.IncrementalBackupSpec.Builder, + com.google.spanner.admin.database.v1.IncrementalBackupSpecOrBuilder>( + (com.google.spanner.admin.database.v1.IncrementalBackupSpec) backupTypeSpec_, + getParentForChildren(), + isClean()); + backupTypeSpec_ = null; + } + backupTypeSpecCase_ = 8; + onChanged(); + return incrementalBackupSpecBuilder_; + } + + private com.google.protobuf.Timestamp updateTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + updateTimeBuilder_; + /** + * + * + *
    +     * Output only. The timestamp at which the schedule was last updated.
    +     * If the schedule has never been updated, this field contains the timestamp
    +     * when the schedule was first created.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + public boolean hasUpdateTime() { + return ((bitField0_ & 0x00000040) != 0); + } + /** + * + * + *
    +     * Output only. The timestamp at which the schedule was last updated.
    +     * If the schedule has never been updated, this field contains the timestamp
    +     * when the schedule was first created.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + public com.google.protobuf.Timestamp getUpdateTime() { + if (updateTimeBuilder_ == null) { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } else { + return updateTimeBuilder_.getMessage(); + } + } + /** + * + * + *
    +     * Output only. The timestamp at which the schedule was last updated.
    +     * If the schedule has never been updated, this field contains the timestamp
    +     * when the schedule was first created.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateTime_ = value; + } else { + updateTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + /** + * + * + *
    +     * Output only. The timestamp at which the schedule was last updated.
    +     * If the schedule has never been updated, this field contains the timestamp
    +     * when the schedule was first created.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setUpdateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (updateTimeBuilder_ == null) { + updateTime_ = builderForValue.build(); + } else { + updateTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + /** + * + * + *
    +     * Output only. The timestamp at which the schedule was last updated.
    +     * If the schedule has never been updated, this field contains the timestamp
    +     * when the schedule was first created.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeUpdateTime(com.google.protobuf.Timestamp value) { + if (updateTimeBuilder_ == null) { + if (((bitField0_ & 0x00000040) != 0) + && updateTime_ != null + && updateTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getUpdateTimeBuilder().mergeFrom(value); + } else { + updateTime_ = value; + } + } else { + updateTimeBuilder_.mergeFrom(value); + } + if (updateTime_ != null) { + bitField0_ |= 0x00000040; + onChanged(); + } + return this; + } + /** + * + * + *
    +     * Output only. The timestamp at which the schedule was last updated.
    +     * If the schedule has never been updated, this field contains the timestamp
    +     * when the schedule was first created.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearUpdateTime() { + bitField0_ = (bitField0_ & ~0x00000040); + updateTime_ = null; + if (updateTimeBuilder_ != null) { + updateTimeBuilder_.dispose(); + updateTimeBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
    +     * Output only. The timestamp at which the schedule was last updated.
    +     * If the schedule has never been updated, this field contains the timestamp
    +     * when the schedule was first created.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getUpdateTimeBuilder() { + bitField0_ |= 0x00000040; + onChanged(); + return getUpdateTimeFieldBuilder().getBuilder(); + } + /** + * + * + *
    +     * Output only. The timestamp at which the schedule was last updated.
    +     * If the schedule has never been updated, this field contains the timestamp
    +     * when the schedule was first created.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { + if (updateTimeBuilder_ != null) { + return updateTimeBuilder_.getMessageOrBuilder(); + } else { + return updateTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : updateTime_; + } + } + /** + * + * + *
    +     * Output only. The timestamp at which the schedule was last updated.
    +     * If the schedule has never been updated, this field contains the timestamp
    +     * when the schedule was first created.
    +     * 
    + * + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getUpdateTimeFieldBuilder() { + if (updateTimeBuilder_ == null) { + updateTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getUpdateTime(), getParentForChildren(), isClean()); + updateTime_ = null; + } + return updateTimeBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.BackupSchedule) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.BackupSchedule) + private static final com.google.spanner.admin.database.v1.BackupSchedule DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.BackupSchedule(); + } + + public static com.google.spanner.admin.database.v1.BackupSchedule getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BackupSchedule parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupSchedule getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleName.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleName.java new file mode 100644 index 00000000000..4ab35282678 --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleName.java @@ -0,0 +1,261 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.spanner.admin.database.v1; + +import com.google.api.pathtemplate.PathTemplate; +import com.google.api.resourcenames.ResourceName; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS. +@Generated("by gapic-generator-java") +public class BackupScheduleName implements ResourceName { + private static final PathTemplate PROJECT_INSTANCE_DATABASE_SCHEDULE = + PathTemplate.createWithoutUrlEncoding( + "projects/{project}/instances/{instance}/databases/{database}/backupSchedules/{schedule}"); + private volatile Map fieldValuesMap; + private final String project; + private final String instance; + private final String database; + private final String schedule; + + @Deprecated + protected BackupScheduleName() { + project = null; + instance = null; + database = null; + schedule = null; + } + + private BackupScheduleName(Builder builder) { + project = Preconditions.checkNotNull(builder.getProject()); + instance = Preconditions.checkNotNull(builder.getInstance()); + database = Preconditions.checkNotNull(builder.getDatabase()); + schedule = Preconditions.checkNotNull(builder.getSchedule()); + } + + public String getProject() { + return project; + } + + public String getInstance() { + return instance; + } + + public String getDatabase() { + return database; + } + + public String getSchedule() { + return schedule; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static BackupScheduleName of( + String project, String instance, String database, String schedule) { + return newBuilder() + .setProject(project) + .setInstance(instance) + .setDatabase(database) + .setSchedule(schedule) + .build(); + } + + public static String format(String project, String instance, String database, String schedule) { + return newBuilder() + .setProject(project) + .setInstance(instance) + .setDatabase(database) + .setSchedule(schedule) + .build() + .toString(); + } + + public static BackupScheduleName parse(String formattedString) { + if (formattedString.isEmpty()) { + return null; + } + Map matchMap = + PROJECT_INSTANCE_DATABASE_SCHEDULE.validatedMatch( + formattedString, "BackupScheduleName.parse: formattedString not in valid format"); + return of( + matchMap.get("project"), + matchMap.get("instance"), + matchMap.get("database"), + matchMap.get("schedule")); + } + + public static List parseList(List formattedStrings) { + List list = new ArrayList<>(formattedStrings.size()); + for (String formattedString : formattedStrings) { + list.add(parse(formattedString)); + } + return list; + } + + public static List toStringList(List values) { + List list = new ArrayList<>(values.size()); + for (BackupScheduleName value : values) { + if (value == null) { + list.add(""); + } else { + list.add(value.toString()); + } + } + return list; + } + + public static boolean isParsableFrom(String formattedString) { + return PROJECT_INSTANCE_DATABASE_SCHEDULE.matches(formattedString); + } + + @Override + public Map getFieldValuesMap() { + if (fieldValuesMap == null) { + synchronized (this) { + if (fieldValuesMap == null) { + ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); + if (project != null) { + fieldMapBuilder.put("project", project); + } + if (instance != null) { + fieldMapBuilder.put("instance", instance); + } + if (database != null) { + fieldMapBuilder.put("database", database); + } + if (schedule != null) { + fieldMapBuilder.put("schedule", schedule); + } + fieldValuesMap = fieldMapBuilder.build(); + } + } + } + return fieldValuesMap; + } + + public String getFieldValue(String fieldName) { + return getFieldValuesMap().get(fieldName); + } + + @Override + public String toString() { + return PROJECT_INSTANCE_DATABASE_SCHEDULE.instantiate( + "project", project, "instance", instance, "database", database, "schedule", schedule); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (o != null && getClass() == o.getClass()) { + BackupScheduleName that = ((BackupScheduleName) o); + return Objects.equals(this.project, that.project) + && Objects.equals(this.instance, that.instance) + && Objects.equals(this.database, that.database) + && Objects.equals(this.schedule, that.schedule); + } + return false; + } + + @Override + public int hashCode() { + int h = 1; + h *= 1000003; + h ^= Objects.hashCode(project); + h *= 1000003; + h ^= Objects.hashCode(instance); + h *= 1000003; + h ^= Objects.hashCode(database); + h *= 1000003; + h ^= Objects.hashCode(schedule); + return h; + } + + /** + * Builder for + * projects/{project}/instances/{instance}/databases/{database}/backupSchedules/{schedule}. + */ + public static class Builder { + private String project; + private String instance; + private String database; + private String schedule; + + protected Builder() {} + + public String getProject() { + return project; + } + + public String getInstance() { + return instance; + } + + public String getDatabase() { + return database; + } + + public String getSchedule() { + return schedule; + } + + public Builder setProject(String project) { + this.project = project; + return this; + } + + public Builder setInstance(String instance) { + this.instance = instance; + return this; + } + + public Builder setDatabase(String database) { + this.database = database; + return this; + } + + public Builder setSchedule(String schedule) { + this.schedule = schedule; + return this; + } + + private Builder(BackupScheduleName backupScheduleName) { + this.project = backupScheduleName.project; + this.instance = backupScheduleName.instance; + this.database = backupScheduleName.database; + this.schedule = backupScheduleName.schedule; + } + + public BackupScheduleName build() { + return new BackupScheduleName(this); + } + } +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleOrBuilder.java new file mode 100644 index 00000000000..c4320e1dcfd --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleOrBuilder.java @@ -0,0 +1,326 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/backup_schedule.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.database.v1; + +public interface BackupScheduleOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.BackupSchedule) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Identifier. Output only for the
    +   * [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation.
    +   * Required for the
    +   * [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]
    +   * operation. A globally unique identifier for the backup schedule which
    +   * cannot be changed. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]`
    +   * The final segment of the name must be between 2 and 60 characters in
    +   * length.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + *
    +   * Identifier. Output only for the
    +   * [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation.
    +   * Required for the
    +   * [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]
    +   * operation. A globally unique identifier for the backup schedule which
    +   * cannot be changed. Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]`
    +   * The final segment of the name must be between 2 and 60 characters in
    +   * length.
    +   * 
    + * + * string name = 1 [(.google.api.field_behavior) = IDENTIFIER]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
    +   * Optional. The schedule specification based on which the backup creations
    +   * are triggered.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the spec field is set. + */ + boolean hasSpec(); + /** + * + * + *
    +   * Optional. The schedule specification based on which the backup creations
    +   * are triggered.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The spec. + */ + com.google.spanner.admin.database.v1.BackupScheduleSpec getSpec(); + /** + * + * + *
    +   * Optional. The schedule specification based on which the backup creations
    +   * are triggered.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupScheduleSpec spec = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.admin.database.v1.BackupScheduleSpecOrBuilder getSpecOrBuilder(); + + /** + * + * + *
    +   * Optional. The retention duration of a backup that must be at least 6 hours
    +   * and at most 366 days. The backup is eligible to be automatically deleted
    +   * once the retention period has elapsed.
    +   * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the retentionDuration field is set. + */ + boolean hasRetentionDuration(); + /** + * + * + *
    +   * Optional. The retention duration of a backup that must be at least 6 hours
    +   * and at most 366 days. The backup is eligible to be automatically deleted
    +   * once the retention period has elapsed.
    +   * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The retentionDuration. + */ + com.google.protobuf.Duration getRetentionDuration(); + /** + * + * + *
    +   * Optional. The retention duration of a backup that must be at least 6 hours
    +   * and at most 366 days. The backup is eligible to be automatically deleted
    +   * once the retention period has elapsed.
    +   * 
    + * + * + * .google.protobuf.Duration retention_duration = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.DurationOrBuilder getRetentionDurationOrBuilder(); + + /** + * + * + *
    +   * Optional. The encryption configuration that will be used to encrypt the
    +   * backup. If this field is not specified, the backup will use the same
    +   * encryption configuration as the database.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the encryptionConfig field is set. + */ + boolean hasEncryptionConfig(); + /** + * + * + *
    +   * Optional. The encryption configuration that will be used to encrypt the
    +   * backup. If this field is not specified, the backup will use the same
    +   * encryption configuration as the database.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The encryptionConfig. + */ + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig getEncryptionConfig(); + /** + * + * + *
    +   * Optional. The encryption configuration that will be used to encrypt the
    +   * backup. If this field is not specified, the backup will use the same
    +   * encryption configuration as the database.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.CreateBackupEncryptionConfig encryption_config = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.admin.database.v1.CreateBackupEncryptionConfigOrBuilder + getEncryptionConfigOrBuilder(); + + /** + * + * + *
    +   * The schedule creates only full backups.
    +   * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + * + * @return Whether the fullBackupSpec field is set. + */ + boolean hasFullBackupSpec(); + /** + * + * + *
    +   * The schedule creates only full backups.
    +   * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + * + * @return The fullBackupSpec. + */ + com.google.spanner.admin.database.v1.FullBackupSpec getFullBackupSpec(); + /** + * + * + *
    +   * The schedule creates only full backups.
    +   * 
    + * + * .google.spanner.admin.database.v1.FullBackupSpec full_backup_spec = 7; + */ + com.google.spanner.admin.database.v1.FullBackupSpecOrBuilder getFullBackupSpecOrBuilder(); + + /** + * + * + *
    +   * The schedule creates incremental backup chains.
    +   * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + * + * @return Whether the incrementalBackupSpec field is set. + */ + boolean hasIncrementalBackupSpec(); + /** + * + * + *
    +   * The schedule creates incremental backup chains.
    +   * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + * + * @return The incrementalBackupSpec. + */ + com.google.spanner.admin.database.v1.IncrementalBackupSpec getIncrementalBackupSpec(); + /** + * + * + *
    +   * The schedule creates incremental backup chains.
    +   * 
    + * + * .google.spanner.admin.database.v1.IncrementalBackupSpec incremental_backup_spec = 8; + * + */ + com.google.spanner.admin.database.v1.IncrementalBackupSpecOrBuilder + getIncrementalBackupSpecOrBuilder(); + + /** + * + * + *
    +   * Output only. The timestamp at which the schedule was last updated.
    +   * If the schedule has never been updated, this field contains the timestamp
    +   * when the schedule was first created.
    +   * 
    + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the updateTime field is set. + */ + boolean hasUpdateTime(); + /** + * + * + *
    +   * Output only. The timestamp at which the schedule was last updated.
    +   * If the schedule has never been updated, this field contains the timestamp
    +   * when the schedule was first created.
    +   * 
    + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The updateTime. + */ + com.google.protobuf.Timestamp getUpdateTime(); + /** + * + * + *
    +   * Output only. The timestamp at which the schedule was last updated.
    +   * If the schedule has never been updated, this field contains the timestamp
    +   * when the schedule was first created.
    +   * 
    + * + * .google.protobuf.Timestamp update_time = 9 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder(); + + com.google.spanner.admin.database.v1.BackupSchedule.BackupTypeSpecCase getBackupTypeSpecCase(); +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleProto.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleProto.java new file mode 100644 index 00000000000..bd19430f813 --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleProto.java @@ -0,0 +1,241 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/backup_schedule.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.database.v1; + +public final class BackupScheduleProto { + private BackupScheduleProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_BackupScheduleSpec_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_BackupScheduleSpec_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_BackupSchedule_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_BackupSchedule_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_CrontabSpec_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_CrontabSpec_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_CreateBackupScheduleRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_CreateBackupScheduleRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_GetBackupScheduleRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_GetBackupScheduleRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_DeleteBackupScheduleRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_DeleteBackupScheduleRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_ListBackupSchedulesRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_ListBackupSchedulesRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_ListBackupSchedulesResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_ListBackupSchedulesResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_UpdateBackupScheduleRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_UpdateBackupScheduleRequest_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n6google/spanner/admin/database/v1/backu" + + "p_schedule.proto\022 google.spanner.admin.d" + + "atabase.v1\032\037google/api/field_behavior.pr" + + "oto\032\031google/api/resource.proto\032\036google/p" + + "rotobuf/duration.proto\032 google/protobuf/" + + "field_mask.proto\032\037google/protobuf/timest" + + "amp.proto\032-google/spanner/admin/database" + + "/v1/backup.proto\"i\n\022BackupScheduleSpec\022B" + + "\n\tcron_spec\030\001 \001(\0132-.google.spanner.admin" + + ".database.v1.CrontabSpecH\000B\017\n\rschedule_s" + + "pec\"\244\005\n\016BackupSchedule\022\021\n\004name\030\001 \001(\tB\003\340A" + + "\010\022G\n\004spec\030\006 \001(\01324.google.spanner.admin.d" + + "atabase.v1.BackupScheduleSpecB\003\340A\001\022:\n\022re" + + "tention_duration\030\003 \001(\0132\031.google.protobuf" + + ".DurationB\003\340A\001\022^\n\021encryption_config\030\004 \001(" + + "\0132>.google.spanner.admin.database.v1.Cre" + + "ateBackupEncryptionConfigB\003\340A\001\022L\n\020full_b" + + "ackup_spec\030\007 \001(\01320.google.spanner.admin." + + "database.v1.FullBackupSpecH\000\022Z\n\027incremen" + + "tal_backup_spec\030\010 \001(\01327.google.spanner.a" + + "dmin.database.v1.IncrementalBackupSpecH\000" + + "\0224\n\013update_time\030\t \001(\0132\032.google.protobuf." + + "TimestampB\003\340A\003:\245\001\352A\241\001\n%spanner.googleapi" + + "s.com/BackupSchedule\022Wprojects/{project}" + + "/instances/{instance}/databases/{databas" + + "e}/backupSchedules/{schedule}*\017backupSch" + + "edules2\016backupScheduleB\022\n\020backup_type_sp" + + "ec\"q\n\013CrontabSpec\022\021\n\004text\030\001 \001(\tB\003\340A\002\022\026\n\t" + + "time_zone\030\002 \001(\tB\003\340A\003\0227\n\017creation_window\030" + + "\003 \001(\0132\031.google.protobuf.DurationB\003\340A\003\"\307\001" + + "\n\033CreateBackupScheduleRequest\0227\n\006parent\030" + + "\001 \001(\tB\'\340A\002\372A!\n\037spanner.googleapis.com/Da" + + "tabase\022\037\n\022backup_schedule_id\030\002 \001(\tB\003\340A\002\022" + + "N\n\017backup_schedule\030\003 \001(\01320.google.spanne" + + "r.admin.database.v1.BackupScheduleB\003\340A\002\"" + + "W\n\030GetBackupScheduleRequest\022;\n\004name\030\001 \001(" + + "\tB-\340A\002\372A\'\n%spanner.googleapis.com/Backup" + + "Schedule\"Z\n\033DeleteBackupScheduleRequest\022" + + ";\n\004name\030\001 \001(\tB-\340A\002\372A\'\n%spanner.googleapi" + + "s.com/BackupSchedule\"\206\001\n\032ListBackupSched" + + "ulesRequest\0227\n\006parent\030\001 \001(\tB\'\340A\002\372A!\n\037spa" + + "nner.googleapis.com/Database\022\026\n\tpage_siz" + + "e\030\002 \001(\005B\003\340A\001\022\027\n\npage_token\030\004 \001(\tB\003\340A\001\"\202\001" + + "\n\033ListBackupSchedulesResponse\022J\n\020backup_" + + "schedules\030\001 \003(\01320.google.spanner.admin.d" + + "atabase.v1.BackupSchedule\022\027\n\017next_page_t" + + "oken\030\002 \001(\t\"\243\001\n\033UpdateBackupScheduleReque" + + "st\022N\n\017backup_schedule\030\001 \001(\01320.google.spa" + + "nner.admin.database.v1.BackupScheduleB\003\340" + + "A\002\0224\n\013update_mask\030\002 \001(\0132\032.google.protobu" + + "f.FieldMaskB\003\340A\002B\205\002\n$com.google.spanner." + + "admin.database.v1B\023BackupScheduleProtoP\001" + + "ZFcloud.google.com/go/spanner/admin/data" + + "base/apiv1/databasepb;databasepb\252\002&Googl" + + "e.Cloud.Spanner.Admin.Database.V1\312\002&Goog" + + "le\\Cloud\\Spanner\\Admin\\Database\\V1\352\002+Goo" + + "gle::Cloud::Spanner::Admin::Database::V1" + + "b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), + com.google.protobuf.DurationProto.getDescriptor(), + com.google.protobuf.FieldMaskProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + com.google.spanner.admin.database.v1.BackupProto.getDescriptor(), + }); + internal_static_google_spanner_admin_database_v1_BackupScheduleSpec_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_spanner_admin_database_v1_BackupScheduleSpec_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_BackupScheduleSpec_descriptor, + new java.lang.String[] { + "CronSpec", "ScheduleSpec", + }); + internal_static_google_spanner_admin_database_v1_BackupSchedule_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_spanner_admin_database_v1_BackupSchedule_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_BackupSchedule_descriptor, + new java.lang.String[] { + "Name", + "Spec", + "RetentionDuration", + "EncryptionConfig", + "FullBackupSpec", + "IncrementalBackupSpec", + "UpdateTime", + "BackupTypeSpec", + }); + internal_static_google_spanner_admin_database_v1_CrontabSpec_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_google_spanner_admin_database_v1_CrontabSpec_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_CrontabSpec_descriptor, + new java.lang.String[] { + "Text", "TimeZone", "CreationWindow", + }); + internal_static_google_spanner_admin_database_v1_CreateBackupScheduleRequest_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_google_spanner_admin_database_v1_CreateBackupScheduleRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_CreateBackupScheduleRequest_descriptor, + new java.lang.String[] { + "Parent", "BackupScheduleId", "BackupSchedule", + }); + internal_static_google_spanner_admin_database_v1_GetBackupScheduleRequest_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_google_spanner_admin_database_v1_GetBackupScheduleRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_GetBackupScheduleRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_spanner_admin_database_v1_DeleteBackupScheduleRequest_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_google_spanner_admin_database_v1_DeleteBackupScheduleRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_DeleteBackupScheduleRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_spanner_admin_database_v1_ListBackupSchedulesRequest_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_google_spanner_admin_database_v1_ListBackupSchedulesRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_ListBackupSchedulesRequest_descriptor, + new java.lang.String[] { + "Parent", "PageSize", "PageToken", + }); + internal_static_google_spanner_admin_database_v1_ListBackupSchedulesResponse_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_google_spanner_admin_database_v1_ListBackupSchedulesResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_ListBackupSchedulesResponse_descriptor, + new java.lang.String[] { + "BackupSchedules", "NextPageToken", + }); + internal_static_google_spanner_admin_database_v1_UpdateBackupScheduleRequest_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_google_spanner_admin_database_v1_UpdateBackupScheduleRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_UpdateBackupScheduleRequest_descriptor, + new java.lang.String[] { + "BackupSchedule", "UpdateMask", + }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + registry.add(com.google.api.ResourceProto.resource); + registry.add(com.google.api.ResourceProto.resourceReference); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); + com.google.protobuf.DurationProto.getDescriptor(); + com.google.protobuf.FieldMaskProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + com.google.spanner.admin.database.v1.BackupProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleSpec.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleSpec.java new file mode 100644 index 00000000000..8b09abda5f2 --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleSpec.java @@ -0,0 +1,820 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/backup_schedule.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * Defines specifications of the backup schedule.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.BackupScheduleSpec} + */ +public final class BackupScheduleSpec extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.BackupScheduleSpec) + BackupScheduleSpecOrBuilder { + private static final long serialVersionUID = 0L; + // Use BackupScheduleSpec.newBuilder() to construct. + private BackupScheduleSpec(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BackupScheduleSpec() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new BackupScheduleSpec(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_BackupScheduleSpec_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_BackupScheduleSpec_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.BackupScheduleSpec.class, + com.google.spanner.admin.database.v1.BackupScheduleSpec.Builder.class); + } + + private int scheduleSpecCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object scheduleSpec_; + + public enum ScheduleSpecCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + CRON_SPEC(1), + SCHEDULESPEC_NOT_SET(0); + private final int value; + + private ScheduleSpecCase(int value) { + this.value = value; + } + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ScheduleSpecCase valueOf(int value) { + return forNumber(value); + } + + public static ScheduleSpecCase forNumber(int value) { + switch (value) { + case 1: + return CRON_SPEC; + case 0: + return SCHEDULESPEC_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public ScheduleSpecCase getScheduleSpecCase() { + return ScheduleSpecCase.forNumber(scheduleSpecCase_); + } + + public static final int CRON_SPEC_FIELD_NUMBER = 1; + /** + * + * + *
    +   * Cron style schedule specification.
    +   * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + * + * @return Whether the cronSpec field is set. + */ + @java.lang.Override + public boolean hasCronSpec() { + return scheduleSpecCase_ == 1; + } + /** + * + * + *
    +   * Cron style schedule specification.
    +   * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + * + * @return The cronSpec. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.CrontabSpec getCronSpec() { + if (scheduleSpecCase_ == 1) { + return (com.google.spanner.admin.database.v1.CrontabSpec) scheduleSpec_; + } + return com.google.spanner.admin.database.v1.CrontabSpec.getDefaultInstance(); + } + /** + * + * + *
    +   * Cron style schedule specification.
    +   * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.CrontabSpecOrBuilder getCronSpecOrBuilder() { + if (scheduleSpecCase_ == 1) { + return (com.google.spanner.admin.database.v1.CrontabSpec) scheduleSpec_; + } + return com.google.spanner.admin.database.v1.CrontabSpec.getDefaultInstance(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (scheduleSpecCase_ == 1) { + output.writeMessage(1, (com.google.spanner.admin.database.v1.CrontabSpec) scheduleSpec_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (scheduleSpecCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, (com.google.spanner.admin.database.v1.CrontabSpec) scheduleSpec_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.BackupScheduleSpec)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.BackupScheduleSpec other = + (com.google.spanner.admin.database.v1.BackupScheduleSpec) obj; + + if (!getScheduleSpecCase().equals(other.getScheduleSpecCase())) return false; + switch (scheduleSpecCase_) { + case 1: + if (!getCronSpec().equals(other.getCronSpec())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + switch (scheduleSpecCase_) { + case 1: + hash = (37 * hash) + CRON_SPEC_FIELD_NUMBER; + hash = (53 * hash) + getCronSpec().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.BackupScheduleSpec prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
    +   * Defines specifications of the backup schedule.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.BackupScheduleSpec} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.BackupScheduleSpec) + com.google.spanner.admin.database.v1.BackupScheduleSpecOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_BackupScheduleSpec_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_BackupScheduleSpec_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.BackupScheduleSpec.class, + com.google.spanner.admin.database.v1.BackupScheduleSpec.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.BackupScheduleSpec.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (cronSpecBuilder_ != null) { + cronSpecBuilder_.clear(); + } + scheduleSpecCase_ = 0; + scheduleSpec_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_BackupScheduleSpec_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupScheduleSpec getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.BackupScheduleSpec.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupScheduleSpec build() { + com.google.spanner.admin.database.v1.BackupScheduleSpec result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupScheduleSpec buildPartial() { + com.google.spanner.admin.database.v1.BackupScheduleSpec result = + new com.google.spanner.admin.database.v1.BackupScheduleSpec(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.BackupScheduleSpec result) { + int from_bitField0_ = bitField0_; + } + + private void buildPartialOneofs( + com.google.spanner.admin.database.v1.BackupScheduleSpec result) { + result.scheduleSpecCase_ = scheduleSpecCase_; + result.scheduleSpec_ = this.scheduleSpec_; + if (scheduleSpecCase_ == 1 && cronSpecBuilder_ != null) { + result.scheduleSpec_ = cronSpecBuilder_.build(); + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.BackupScheduleSpec) { + return mergeFrom((com.google.spanner.admin.database.v1.BackupScheduleSpec) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.BackupScheduleSpec other) { + if (other == com.google.spanner.admin.database.v1.BackupScheduleSpec.getDefaultInstance()) + return this; + switch (other.getScheduleSpecCase()) { + case CRON_SPEC: + { + mergeCronSpec(other.getCronSpec()); + break; + } + case SCHEDULESPEC_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getCronSpecFieldBuilder().getBuilder(), extensionRegistry); + scheduleSpecCase_ = 1; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int scheduleSpecCase_ = 0; + private java.lang.Object scheduleSpec_; + + public ScheduleSpecCase getScheduleSpecCase() { + return ScheduleSpecCase.forNumber(scheduleSpecCase_); + } + + public Builder clearScheduleSpec() { + scheduleSpecCase_ = 0; + scheduleSpec_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.database.v1.CrontabSpec, + com.google.spanner.admin.database.v1.CrontabSpec.Builder, + com.google.spanner.admin.database.v1.CrontabSpecOrBuilder> + cronSpecBuilder_; + /** + * + * + *
    +     * Cron style schedule specification.
    +     * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + * + * @return Whether the cronSpec field is set. + */ + @java.lang.Override + public boolean hasCronSpec() { + return scheduleSpecCase_ == 1; + } + /** + * + * + *
    +     * Cron style schedule specification.
    +     * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + * + * @return The cronSpec. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.CrontabSpec getCronSpec() { + if (cronSpecBuilder_ == null) { + if (scheduleSpecCase_ == 1) { + return (com.google.spanner.admin.database.v1.CrontabSpec) scheduleSpec_; + } + return com.google.spanner.admin.database.v1.CrontabSpec.getDefaultInstance(); + } else { + if (scheduleSpecCase_ == 1) { + return cronSpecBuilder_.getMessage(); + } + return com.google.spanner.admin.database.v1.CrontabSpec.getDefaultInstance(); + } + } + /** + * + * + *
    +     * Cron style schedule specification.
    +     * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + */ + public Builder setCronSpec(com.google.spanner.admin.database.v1.CrontabSpec value) { + if (cronSpecBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + scheduleSpec_ = value; + onChanged(); + } else { + cronSpecBuilder_.setMessage(value); + } + scheduleSpecCase_ = 1; + return this; + } + /** + * + * + *
    +     * Cron style schedule specification.
    +     * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + */ + public Builder setCronSpec( + com.google.spanner.admin.database.v1.CrontabSpec.Builder builderForValue) { + if (cronSpecBuilder_ == null) { + scheduleSpec_ = builderForValue.build(); + onChanged(); + } else { + cronSpecBuilder_.setMessage(builderForValue.build()); + } + scheduleSpecCase_ = 1; + return this; + } + /** + * + * + *
    +     * Cron style schedule specification.
    +     * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + */ + public Builder mergeCronSpec(com.google.spanner.admin.database.v1.CrontabSpec value) { + if (cronSpecBuilder_ == null) { + if (scheduleSpecCase_ == 1 + && scheduleSpec_ + != com.google.spanner.admin.database.v1.CrontabSpec.getDefaultInstance()) { + scheduleSpec_ = + com.google.spanner.admin.database.v1.CrontabSpec.newBuilder( + (com.google.spanner.admin.database.v1.CrontabSpec) scheduleSpec_) + .mergeFrom(value) + .buildPartial(); + } else { + scheduleSpec_ = value; + } + onChanged(); + } else { + if (scheduleSpecCase_ == 1) { + cronSpecBuilder_.mergeFrom(value); + } else { + cronSpecBuilder_.setMessage(value); + } + } + scheduleSpecCase_ = 1; + return this; + } + /** + * + * + *
    +     * Cron style schedule specification.
    +     * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + */ + public Builder clearCronSpec() { + if (cronSpecBuilder_ == null) { + if (scheduleSpecCase_ == 1) { + scheduleSpecCase_ = 0; + scheduleSpec_ = null; + onChanged(); + } + } else { + if (scheduleSpecCase_ == 1) { + scheduleSpecCase_ = 0; + scheduleSpec_ = null; + } + cronSpecBuilder_.clear(); + } + return this; + } + /** + * + * + *
    +     * Cron style schedule specification.
    +     * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + */ + public com.google.spanner.admin.database.v1.CrontabSpec.Builder getCronSpecBuilder() { + return getCronSpecFieldBuilder().getBuilder(); + } + /** + * + * + *
    +     * Cron style schedule specification.
    +     * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.CrontabSpecOrBuilder getCronSpecOrBuilder() { + if ((scheduleSpecCase_ == 1) && (cronSpecBuilder_ != null)) { + return cronSpecBuilder_.getMessageOrBuilder(); + } else { + if (scheduleSpecCase_ == 1) { + return (com.google.spanner.admin.database.v1.CrontabSpec) scheduleSpec_; + } + return com.google.spanner.admin.database.v1.CrontabSpec.getDefaultInstance(); + } + } + /** + * + * + *
    +     * Cron style schedule specification.
    +     * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.database.v1.CrontabSpec, + com.google.spanner.admin.database.v1.CrontabSpec.Builder, + com.google.spanner.admin.database.v1.CrontabSpecOrBuilder> + getCronSpecFieldBuilder() { + if (cronSpecBuilder_ == null) { + if (!(scheduleSpecCase_ == 1)) { + scheduleSpec_ = com.google.spanner.admin.database.v1.CrontabSpec.getDefaultInstance(); + } + cronSpecBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.database.v1.CrontabSpec, + com.google.spanner.admin.database.v1.CrontabSpec.Builder, + com.google.spanner.admin.database.v1.CrontabSpecOrBuilder>( + (com.google.spanner.admin.database.v1.CrontabSpec) scheduleSpec_, + getParentForChildren(), + isClean()); + scheduleSpec_ = null; + } + scheduleSpecCase_ = 1; + onChanged(); + return cronSpecBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.BackupScheduleSpec) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.BackupScheduleSpec) + private static final com.google.spanner.admin.database.v1.BackupScheduleSpec DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.BackupScheduleSpec(); + } + + public static com.google.spanner.admin.database.v1.BackupScheduleSpec getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BackupScheduleSpec parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupScheduleSpec getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleSpecOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleSpecOrBuilder.java new file mode 100644 index 00000000000..fbab5446011 --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/BackupScheduleSpecOrBuilder.java @@ -0,0 +1,63 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/backup_schedule.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.database.v1; + +public interface BackupScheduleSpecOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.BackupScheduleSpec) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Cron style schedule specification.
    +   * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + * + * @return Whether the cronSpec field is set. + */ + boolean hasCronSpec(); + /** + * + * + *
    +   * Cron style schedule specification.
    +   * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + * + * @return The cronSpec. + */ + com.google.spanner.admin.database.v1.CrontabSpec getCronSpec(); + /** + * + * + *
    +   * Cron style schedule specification.
    +   * 
    + * + * .google.spanner.admin.database.v1.CrontabSpec cron_spec = 1; + */ + com.google.spanner.admin.database.v1.CrontabSpecOrBuilder getCronSpecOrBuilder(); + + com.google.spanner.admin.database.v1.BackupScheduleSpec.ScheduleSpecCase getScheduleSpecCase(); +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CommonProto.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CommonProto.java index e2b2ad93d01..762b30ba4f7 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CommonProto.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CommonProto.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/common.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public final class CommonProto { @@ -57,33 +57,35 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "\213\001\n\021OperationProgress\022\030\n\020progress_percen" + "t\030\001 \001(\005\022.\n\nstart_time\030\002 \001(\0132\032.google.pro" + "tobuf.Timestamp\022,\n\010end_time\030\003 \001(\0132\032.goog" - + "le.protobuf.Timestamp\"P\n\020EncryptionConfi" - + "g\022<\n\014kms_key_name\030\002 \001(\tB&\372A#\n!cloudkms.g" - + "oogleapis.com/CryptoKey\"\302\002\n\016EncryptionIn" - + "fo\022S\n\017encryption_type\030\003 \001(\01625.google.spa" - + "nner.admin.database.v1.EncryptionInfo.Ty" - + "peB\003\340A\003\0222\n\021encryption_status\030\004 \001(\0132\022.goo" - + "gle.rpc.StatusB\003\340A\003\022I\n\017kms_key_version\030\002" - + " \001(\tB0\340A\003\372A*\n(cloudkms.googleapis.com/Cr" - + "yptoKeyVersion\"\\\n\004Type\022\024\n\020TYPE_UNSPECIFI" - + "ED\020\000\022\035\n\031GOOGLE_DEFAULT_ENCRYPTION\020\001\022\037\n\033C" - + "USTOMER_MANAGED_ENCRYPTION\020\002*\\\n\017Database" - + "Dialect\022 \n\034DATABASE_DIALECT_UNSPECIFIED\020" - + "\000\022\027\n\023GOOGLE_STANDARD_SQL\020\001\022\016\n\nPOSTGRESQL" - + "\020\002B\242\004\n$com.google.spanner.admin.database" - + ".v1B\013CommonProtoP\001ZFcloud.google.com/go/" - + "spanner/admin/database/apiv1/databasepb;" - + "databasepb\252\002&Google.Cloud.Spanner.Admin." - + "Database.V1\312\002&Google\\Cloud\\Spanner\\Admin" - + "\\Database\\V1\352\002+Google::Cloud::Spanner::A" - + "dmin::Database::V1\352Ax\n!cloudkms.googleap" - + "is.com/CryptoKey\022Sprojects/{project}/loc" - + "ations/{location}/keyRings/{key_ring}/cr" - + "yptoKeys/{crypto_key}\352A\246\001\n(cloudkms.goog" - + "leapis.com/CryptoKeyVersion\022zprojects/{p" - + "roject}/locations/{location}/keyRings/{k" - + "ey_ring}/cryptoKeys/{crypto_key}/cryptoK" - + "eyVersions/{crypto_key_version}b\006proto3" + + "le.protobuf.Timestamp\"\217\001\n\020EncryptionConf" + + "ig\022<\n\014kms_key_name\030\002 \001(\tB&\372A#\n!cloudkms." + + "googleapis.com/CryptoKey\022=\n\rkms_key_name" + + "s\030\003 \003(\tB&\372A#\n!cloudkms.googleapis.com/Cr" + + "yptoKey\"\302\002\n\016EncryptionInfo\022S\n\017encryption" + + "_type\030\003 \001(\01625.google.spanner.admin.datab" + + "ase.v1.EncryptionInfo.TypeB\003\340A\003\0222\n\021encry" + + "ption_status\030\004 \001(\0132\022.google.rpc.StatusB\003" + + "\340A\003\022I\n\017kms_key_version\030\002 \001(\tB0\340A\003\372A*\n(cl" + + "oudkms.googleapis.com/CryptoKeyVersion\"\\" + + "\n\004Type\022\024\n\020TYPE_UNSPECIFIED\020\000\022\035\n\031GOOGLE_D" + + "EFAULT_ENCRYPTION\020\001\022\037\n\033CUSTOMER_MANAGED_" + + "ENCRYPTION\020\002*\\\n\017DatabaseDialect\022 \n\034DATAB" + + "ASE_DIALECT_UNSPECIFIED\020\000\022\027\n\023GOOGLE_STAN" + + "DARD_SQL\020\001\022\016\n\nPOSTGRESQL\020\002B\242\004\n$com.googl" + + "e.spanner.admin.database.v1B\013CommonProto" + + "P\001ZFcloud.google.com/go/spanner/admin/da" + + "tabase/apiv1/databasepb;databasepb\252\002&Goo" + + "gle.Cloud.Spanner.Admin.Database.V1\312\002&Go" + + "ogle\\Cloud\\Spanner\\Admin\\Database\\V1\352\002+G" + + "oogle::Cloud::Spanner::Admin::Database::" + + "V1\352Ax\n!cloudkms.googleapis.com/CryptoKey" + + "\022Sprojects/{project}/locations/{location" + + "}/keyRings/{key_ring}/cryptoKeys/{crypto" + + "_key}\352A\246\001\n(cloudkms.googleapis.com/Crypt" + + "oKeyVersion\022zprojects/{project}/location" + + "s/{location}/keyRings/{key_ring}/cryptoK" + + "eys/{crypto_key}/cryptoKeyVersions/{cryp" + + "to_key_version}b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -108,7 +110,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_database_v1_EncryptionConfig_descriptor, new java.lang.String[] { - "KmsKeyName", + "KmsKeyName", "KmsKeyNames", }); internal_static_google_spanner_admin_database_v1_EncryptionInfo_descriptor = getDescriptor().getMessageTypes().get(2); diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupEncryptionConfig.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupEncryptionConfig.java index 7f2f0db1d12..efe179261d9 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupEncryptionConfig.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupEncryptionConfig.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** @@ -41,6 +41,7 @@ private CopyBackupEncryptionConfig(com.google.protobuf.GeneratedMessageV3.Builde private CopyBackupEncryptionConfig() { encryptionType_ = 0; kmsKeyName_ = ""; + kmsKeyNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); } @java.lang.Override @@ -89,10 +90,13 @@ public enum EncryptionType implements com.google.protobuf.ProtocolMessageEnum { * * *
    -     * This is the default option for [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    -     * when [encryption_config][google.spanner.admin.database.v1.CopyBackupEncryptionConfig] is not specified.
    -     * For example, if the source backup is using `Customer_Managed_Encryption`,
    -     * the backup will be using the same Cloud KMS key as the source backup.
    +     * This is the default option for
    +     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +     * when
    +     * [encryption_config][google.spanner.admin.database.v1.CopyBackupEncryptionConfig]
    +     * is not specified. For example, if the source backup is using
    +     * `Customer_Managed_Encryption`, the backup will be using the same Cloud
    +     * KMS key as the source backup.
          * 
    * * USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION = 1; @@ -112,8 +116,8 @@ public enum EncryptionType implements com.google.protobuf.ProtocolMessageEnum { * * *
    -     * Use customer managed encryption. If specified, `kms_key_name`
    -     * must contain a valid Cloud KMS key.
    +     * Use customer managed encryption. If specified, either `kms_key_name` or
    +     * `kms_key_names` must contain valid Cloud KMS key(s).
          * 
    * * CUSTOMER_MANAGED_ENCRYPTION = 3; @@ -136,10 +140,13 @@ public enum EncryptionType implements com.google.protobuf.ProtocolMessageEnum { * * *
    -     * This is the default option for [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    -     * when [encryption_config][google.spanner.admin.database.v1.CopyBackupEncryptionConfig] is not specified.
    -     * For example, if the source backup is using `Customer_Managed_Encryption`,
    -     * the backup will be using the same Cloud KMS key as the source backup.
    +     * This is the default option for
    +     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +     * when
    +     * [encryption_config][google.spanner.admin.database.v1.CopyBackupEncryptionConfig]
    +     * is not specified. For example, if the source backup is using
    +     * `Customer_Managed_Encryption`, the backup will be using the same Cloud
    +     * KMS key as the source backup.
          * 
    * * USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION = 1; @@ -159,8 +166,8 @@ public enum EncryptionType implements com.google.protobuf.ProtocolMessageEnum { * * *
    -     * Use customer managed encryption. If specified, `kms_key_name`
    -     * must contain a valid Cloud KMS key.
    +     * Use customer managed encryption. If specified, either `kms_key_name` or
    +     * `kms_key_names` must contain valid Cloud KMS key(s).
          * 
    * * CUSTOMER_MANAGED_ENCRYPTION = 3; @@ -308,8 +315,8 @@ public int getEncryptionTypeValue() { *
        * Optional. The Cloud KMS key that will be used to protect the backup.
        * This field should be set only when
    -   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] is
    -   * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +   * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
        * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
        * 
    * @@ -337,8 +344,8 @@ public java.lang.String getKmsKeyName() { *
        * Optional. The Cloud KMS key that will be used to protect the backup.
        * This field should be set only when
    -   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] is
    -   * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +   * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
        * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
        * 
    * @@ -361,6 +368,138 @@ public com.google.protobuf.ByteString getKmsKeyNameBytes() { } } + public static final int KMS_KEY_NAMES_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList kmsKeyNames_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * Kms keys specified can be in any order.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the kmsKeyNames. + */ + public com.google.protobuf.ProtocolStringList getKmsKeyNamesList() { + return kmsKeyNames_; + } + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * Kms keys specified can be in any order.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The count of kmsKeyNames. + */ + public int getKmsKeyNamesCount() { + return kmsKeyNames_.size(); + } + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * Kms keys specified can be in any order.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The kmsKeyNames at the given index. + */ + public java.lang.String getKmsKeyNames(int index) { + return kmsKeyNames_.get(index); + } + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * Kms keys specified can be in any order.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the kmsKeyNames at the given index. + */ + public com.google.protobuf.ByteString getKmsKeyNamesBytes(int index) { + return kmsKeyNames_.getByteString(index); + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -384,6 +523,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(kmsKeyName_)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 2, kmsKeyName_); } + for (int i = 0; i < kmsKeyNames_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, kmsKeyNames_.getRaw(i)); + } getUnknownFields().writeTo(output); } @@ -402,6 +544,14 @@ public int getSerializedSize() { if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(kmsKeyName_)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, kmsKeyName_); } + { + int dataSize = 0; + for (int i = 0; i < kmsKeyNames_.size(); i++) { + dataSize += computeStringSizeNoTag(kmsKeyNames_.getRaw(i)); + } + size += dataSize; + size += 1 * getKmsKeyNamesList().size(); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -420,6 +570,7 @@ public boolean equals(final java.lang.Object obj) { if (encryptionType_ != other.encryptionType_) return false; if (!getKmsKeyName().equals(other.getKmsKeyName())) return false; + if (!getKmsKeyNamesList().equals(other.getKmsKeyNamesList())) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @@ -435,6 +586,10 @@ public int hashCode() { hash = (53 * hash) + encryptionType_; hash = (37 * hash) + KMS_KEY_NAME_FIELD_NUMBER; hash = (53 * hash) + getKmsKeyName().hashCode(); + if (getKmsKeyNamesCount() > 0) { + hash = (37 * hash) + KMS_KEY_NAMES_FIELD_NUMBER; + hash = (53 * hash) + getKmsKeyNamesList().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -577,6 +732,7 @@ public Builder clear() { bitField0_ = 0; encryptionType_ = 0; kmsKeyName_ = ""; + kmsKeyNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); return this; } @@ -621,6 +777,10 @@ private void buildPartial0( if (((from_bitField0_ & 0x00000002) != 0)) { result.kmsKeyName_ = kmsKeyName_; } + if (((from_bitField0_ & 0x00000004) != 0)) { + kmsKeyNames_.makeImmutable(); + result.kmsKeyNames_ = kmsKeyNames_; + } } @java.lang.Override @@ -679,6 +839,16 @@ public Builder mergeFrom( bitField0_ |= 0x00000002; onChanged(); } + if (!other.kmsKeyNames_.isEmpty()) { + if (kmsKeyNames_.isEmpty()) { + kmsKeyNames_ = other.kmsKeyNames_; + bitField0_ |= 0x00000004; + } else { + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.addAll(other.kmsKeyNames_); + } + onChanged(); + } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; @@ -717,6 +887,13 @@ public Builder mergeFrom( bitField0_ |= 0x00000002; break; } // case 18 + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.add(s); + break; + } // case 26 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { @@ -849,8 +1026,8 @@ public Builder clearEncryptionType() { *
          * Optional. The Cloud KMS key that will be used to protect the backup.
          * This field should be set only when
    -     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] is
    -     * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
          * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
          * 
    * @@ -877,8 +1054,8 @@ public java.lang.String getKmsKeyName() { *
          * Optional. The Cloud KMS key that will be used to protect the backup.
          * This field should be set only when
    -     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] is
    -     * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
          * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
          * 
    * @@ -905,8 +1082,8 @@ public com.google.protobuf.ByteString getKmsKeyNameBytes() { *
          * Optional. The Cloud KMS key that will be used to protect the backup.
          * This field should be set only when
    -     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] is
    -     * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
          * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
          * 
    * @@ -932,8 +1109,8 @@ public Builder setKmsKeyName(java.lang.String value) { *
          * Optional. The Cloud KMS key that will be used to protect the backup.
          * This field should be set only when
    -     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] is
    -     * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
          * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
          * 
    * @@ -955,8 +1132,8 @@ public Builder clearKmsKeyName() { *
          * Optional. The Cloud KMS key that will be used to protect the backup.
          * This field should be set only when
    -     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] is
    -     * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
          * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
          * 
    * @@ -978,6 +1155,333 @@ public Builder setKmsKeyNameBytes(com.google.protobuf.ByteString value) { return this; } + private com.google.protobuf.LazyStringArrayList kmsKeyNames_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureKmsKeyNamesIsMutable() { + if (!kmsKeyNames_.isModifiable()) { + kmsKeyNames_ = new com.google.protobuf.LazyStringArrayList(kmsKeyNames_); + } + bitField0_ |= 0x00000004; + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * Kms keys specified can be in any order.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the kmsKeyNames. + */ + public com.google.protobuf.ProtocolStringList getKmsKeyNamesList() { + kmsKeyNames_.makeImmutable(); + return kmsKeyNames_; + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * Kms keys specified can be in any order.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The count of kmsKeyNames. + */ + public int getKmsKeyNamesCount() { + return kmsKeyNames_.size(); + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * Kms keys specified can be in any order.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The kmsKeyNames at the given index. + */ + public java.lang.String getKmsKeyNames(int index) { + return kmsKeyNames_.get(index); + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * Kms keys specified can be in any order.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the kmsKeyNames at the given index. + */ + public com.google.protobuf.ByteString getKmsKeyNamesBytes(int index) { + return kmsKeyNames_.getByteString(index); + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * Kms keys specified can be in any order.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index to set the value at. + * @param value The kmsKeyNames to set. + * @return This builder for chaining. + */ + public Builder setKmsKeyNames(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.set(index, value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * Kms keys specified can be in any order.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The kmsKeyNames to add. + * @return This builder for chaining. + */ + public Builder addKmsKeyNames(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * Kms keys specified can be in any order.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param values The kmsKeyNames to add. + * @return This builder for chaining. + */ + public Builder addAllKmsKeyNames(java.lang.Iterable values) { + ensureKmsKeyNamesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, kmsKeyNames_); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * Kms keys specified can be in any order.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearKmsKeyNames() { + kmsKeyNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + ; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     * Kms keys specified can be in any order.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes of the kmsKeyNames to add. + * @return This builder for chaining. + */ + public Builder addKmsKeyNamesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupEncryptionConfigOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupEncryptionConfigOrBuilder.java index eb15f76cee4..8dd6c0ac244 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupEncryptionConfigOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupEncryptionConfigOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface CopyBackupEncryptionConfigOrBuilder @@ -60,8 +60,8 @@ public interface CopyBackupEncryptionConfigOrBuilder *
        * Optional. The Cloud KMS key that will be used to protect the backup.
        * This field should be set only when
    -   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] is
    -   * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +   * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
        * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
        * 
    * @@ -78,8 +78,8 @@ public interface CopyBackupEncryptionConfigOrBuilder *
        * Optional. The Cloud KMS key that will be used to protect the backup.
        * This field should be set only when
    -   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] is
    -   * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +   * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
        * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
        * 
    * @@ -90,4 +90,123 @@ public interface CopyBackupEncryptionConfigOrBuilder * @return The bytes for kmsKeyName. */ com.google.protobuf.ByteString getKmsKeyNameBytes(); + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * Kms keys specified can be in any order.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the kmsKeyNames. + */ + java.util.List getKmsKeyNamesList(); + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * Kms keys specified can be in any order.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The count of kmsKeyNames. + */ + int getKmsKeyNamesCount(); + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * Kms keys specified can be in any order.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The kmsKeyNames at the given index. + */ + java.lang.String getKmsKeyNames(int index); + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   * Kms keys specified can be in any order.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the kmsKeyNames at the given index. + */ + com.google.protobuf.ByteString getKmsKeyNamesBytes(int index); } diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupMetadata.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupMetadata.java index 11cc6e03107..bd91d0a8d44 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupMetadata.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupMetadata.java @@ -16,14 +16,14 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** * * *
    - * Metadata type for the google.longrunning.Operation returned by
    + * Metadata type for the operation returned by
      * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
      * 
    * @@ -183,7 +183,8 @@ public com.google.protobuf.ByteString getSourceBackupBytes() { * *
        * The progress of the
    -   * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] operation.
    +   * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +   * operation.
        * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -199,7 +200,8 @@ public boolean hasProgress() { * *
        * The progress of the
    -   * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] operation.
    +   * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +   * operation.
        * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -217,7 +219,8 @@ public com.google.spanner.admin.database.v1.OperationProgress getProgress() { * *
        * The progress of the
    -   * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] operation.
    +   * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +   * operation.
        * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -509,7 +512,7 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
    -   * Metadata type for the google.longrunning.Operation returned by
    +   * Metadata type for the operation returned by
        * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
        * 
    * @@ -995,7 +998,8 @@ public Builder setSourceBackupBytes(com.google.protobuf.ByteString value) { * *
          * The progress of the
    -     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] operation.
    +     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +     * operation.
          * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -1010,7 +1014,8 @@ public boolean hasProgress() { * *
          * The progress of the
    -     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] operation.
    +     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +     * operation.
          * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -1031,7 +1036,8 @@ public com.google.spanner.admin.database.v1.OperationProgress getProgress() { * *
          * The progress of the
    -     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] operation.
    +     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +     * operation.
          * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -1054,7 +1060,8 @@ public Builder setProgress(com.google.spanner.admin.database.v1.OperationProgres * *
          * The progress of the
    -     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] operation.
    +     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +     * operation.
          * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -1075,7 +1082,8 @@ public Builder setProgress( * *
          * The progress of the
    -     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] operation.
    +     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +     * operation.
          * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -1104,7 +1112,8 @@ public Builder mergeProgress(com.google.spanner.admin.database.v1.OperationProgr * *
          * The progress of the
    -     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] operation.
    +     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +     * operation.
          * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -1124,7 +1133,8 @@ public Builder clearProgress() { * *
          * The progress of the
    -     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] operation.
    +     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +     * operation.
          * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -1139,7 +1149,8 @@ public com.google.spanner.admin.database.v1.OperationProgress.Builder getProgres * *
          * The progress of the
    -     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] operation.
    +     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +     * operation.
          * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -1158,7 +1169,8 @@ public com.google.spanner.admin.database.v1.OperationProgressOrBuilder getProgre * *
          * The progress of the
    -     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] operation.
    +     * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +     * operation.
          * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupMetadataOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupMetadataOrBuilder.java index 80229fdd762..2f3dbd90c55 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupMetadataOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupMetadataOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface CopyBackupMetadataOrBuilder @@ -87,7 +87,8 @@ public interface CopyBackupMetadataOrBuilder * *
        * The progress of the
    -   * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] operation.
    +   * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +   * operation.
        * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -100,7 +101,8 @@ public interface CopyBackupMetadataOrBuilder * *
        * The progress of the
    -   * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] operation.
    +   * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +   * operation.
        * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -113,7 +115,8 @@ public interface CopyBackupMetadataOrBuilder * *
        * The progress of the
    -   * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] operation.
    +   * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
    +   * operation.
        * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupRequest.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupRequest.java index 7b95919bd1e..ba3850d1c52 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupRequest.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupRequest.java @@ -16,14 +16,15 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** * * *
    - * The request for [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
    + * The request for
    + * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
      * 
    * * Protobuf type {@code google.spanner.admin.database.v1.CopyBackupRequest} @@ -74,8 +75,8 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
    -   * Required. The name of the destination instance that will contain the backup copy.
    -   * Values are of the form: `projects/<project>/instances/<instance>`.
    +   * Required. The name of the destination instance that will contain the backup
    +   * copy. Values are of the form: `projects/<project>/instances/<instance>`.
        * 
    * * @@ -100,8 +101,8 @@ public java.lang.String getParent() { * * *
    -   * Required. The name of the destination instance that will contain the backup copy.
    -   * Values are of the form: `projects/<project>/instances/<instance>`.
    +   * Required. The name of the destination instance that will contain the backup
    +   * copy. Values are of the form: `projects/<project>/instances/<instance>`.
        * 
    * * @@ -310,11 +311,11 @@ public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { * * *
    -   * Optional. The encryption configuration used to encrypt the backup. If this field is
    -   * not specified, the backup will use the same
    -   * encryption configuration as the source backup by default, namely
    -   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] =
    -   * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +   * Optional. The encryption configuration used to encrypt the backup. If this
    +   * field is not specified, the backup will use the same encryption
    +   * configuration as the source backup by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +   * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
        * 
    * * @@ -331,11 +332,11 @@ public boolean hasEncryptionConfig() { * * *
    -   * Optional. The encryption configuration used to encrypt the backup. If this field is
    -   * not specified, the backup will use the same
    -   * encryption configuration as the source backup by default, namely
    -   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] =
    -   * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +   * Optional. The encryption configuration used to encrypt the backup. If this
    +   * field is not specified, the backup will use the same encryption
    +   * configuration as the source backup by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +   * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
        * 
    * * @@ -354,11 +355,11 @@ public com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig getEncryp * * *
    -   * Optional. The encryption configuration used to encrypt the backup. If this field is
    -   * not specified, the backup will use the same
    -   * encryption configuration as the source backup by default, namely
    -   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] =
    -   * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +   * Optional. The encryption configuration used to encrypt the backup. If this
    +   * field is not specified, the backup will use the same encryption
    +   * configuration as the source backup by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +   * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
        * 
    * * @@ -583,7 +584,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
    -   * The request for [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
    +   * The request for
    +   * [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
        * 
    * * Protobuf type {@code google.spanner.admin.database.v1.CopyBackupRequest} @@ -847,8 +849,8 @@ public Builder mergeFrom( * * *
    -     * Required. The name of the destination instance that will contain the backup copy.
    -     * Values are of the form: `projects/<project>/instances/<instance>`.
    +     * Required. The name of the destination instance that will contain the backup
    +     * copy. Values are of the form: `projects/<project>/instances/<instance>`.
          * 
    * * @@ -872,8 +874,8 @@ public java.lang.String getParent() { * * *
    -     * Required. The name of the destination instance that will contain the backup copy.
    -     * Values are of the form: `projects/<project>/instances/<instance>`.
    +     * Required. The name of the destination instance that will contain the backup
    +     * copy. Values are of the form: `projects/<project>/instances/<instance>`.
          * 
    * * @@ -897,8 +899,8 @@ public com.google.protobuf.ByteString getParentBytes() { * * *
    -     * Required. The name of the destination instance that will contain the backup copy.
    -     * Values are of the form: `projects/<project>/instances/<instance>`.
    +     * Required. The name of the destination instance that will contain the backup
    +     * copy. Values are of the form: `projects/<project>/instances/<instance>`.
          * 
    * * @@ -921,8 +923,8 @@ public Builder setParent(java.lang.String value) { * * *
    -     * Required. The name of the destination instance that will contain the backup copy.
    -     * Values are of the form: `projects/<project>/instances/<instance>`.
    +     * Required. The name of the destination instance that will contain the backup
    +     * copy. Values are of the form: `projects/<project>/instances/<instance>`.
          * 
    * * @@ -941,8 +943,8 @@ public Builder clearParent() { * * *
    -     * Required. The name of the destination instance that will contain the backup copy.
    -     * Values are of the form: `projects/<project>/instances/<instance>`.
    +     * Required. The name of the destination instance that will contain the backup
    +     * copy. Values are of the form: `projects/<project>/instances/<instance>`.
          * 
    * * @@ -1460,11 +1462,11 @@ public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { * * *
    -     * Optional. The encryption configuration used to encrypt the backup. If this field is
    -     * not specified, the backup will use the same
    -     * encryption configuration as the source backup by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] =
    -     * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the source backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
          * 
    * * @@ -1480,11 +1482,11 @@ public boolean hasEncryptionConfig() { * * *
    -     * Optional. The encryption configuration used to encrypt the backup. If this field is
    -     * not specified, the backup will use the same
    -     * encryption configuration as the source backup by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] =
    -     * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the source backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
          * 
    * * @@ -1506,11 +1508,11 @@ public com.google.spanner.admin.database.v1.CopyBackupEncryptionConfig getEncryp * * *
    -     * Optional. The encryption configuration used to encrypt the backup. If this field is
    -     * not specified, the backup will use the same
    -     * encryption configuration as the source backup by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] =
    -     * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the source backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
          * 
    * * @@ -1535,11 +1537,11 @@ public Builder setEncryptionConfig( * * *
    -     * Optional. The encryption configuration used to encrypt the backup. If this field is
    -     * not specified, the backup will use the same
    -     * encryption configuration as the source backup by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] =
    -     * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the source backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
          * 
    * * @@ -1561,11 +1563,11 @@ public Builder setEncryptionConfig( * * *
    -     * Optional. The encryption configuration used to encrypt the backup. If this field is
    -     * not specified, the backup will use the same
    -     * encryption configuration as the source backup by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] =
    -     * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the source backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
          * 
    * * @@ -1597,11 +1599,11 @@ public Builder mergeEncryptionConfig( * * *
    -     * Optional. The encryption configuration used to encrypt the backup. If this field is
    -     * not specified, the backup will use the same
    -     * encryption configuration as the source backup by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] =
    -     * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the source backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
          * 
    * * @@ -1622,11 +1624,11 @@ public Builder clearEncryptionConfig() { * * *
    -     * Optional. The encryption configuration used to encrypt the backup. If this field is
    -     * not specified, the backup will use the same
    -     * encryption configuration as the source backup by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] =
    -     * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the source backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
          * 
    * * @@ -1643,11 +1645,11 @@ public Builder clearEncryptionConfig() { * * *
    -     * Optional. The encryption configuration used to encrypt the backup. If this field is
    -     * not specified, the backup will use the same
    -     * encryption configuration as the source backup by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] =
    -     * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the source backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
          * 
    * * @@ -1668,11 +1670,11 @@ public Builder clearEncryptionConfig() { * * *
    -     * Optional. The encryption configuration used to encrypt the backup. If this field is
    -     * not specified, the backup will use the same
    -     * encryption configuration as the source backup by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] =
    -     * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the source backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
          * 
    * * diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupRequestOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupRequestOrBuilder.java index 5481d8c278e..785c4487eef 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CopyBackupRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface CopyBackupRequestOrBuilder @@ -28,8 +28,8 @@ public interface CopyBackupRequestOrBuilder * * *
    -   * Required. The name of the destination instance that will contain the backup copy.
    -   * Values are of the form: `projects/<project>/instances/<instance>`.
    +   * Required. The name of the destination instance that will contain the backup
    +   * copy. Values are of the form: `projects/<project>/instances/<instance>`.
        * 
    * * @@ -43,8 +43,8 @@ public interface CopyBackupRequestOrBuilder * * *
    -   * Required. The name of the destination instance that will contain the backup copy.
    -   * Values are of the form: `projects/<project>/instances/<instance>`.
    +   * Required. The name of the destination instance that will contain the backup
    +   * copy. Values are of the form: `projects/<project>/instances/<instance>`.
        * 
    * * @@ -177,11 +177,11 @@ public interface CopyBackupRequestOrBuilder * * *
    -   * Optional. The encryption configuration used to encrypt the backup. If this field is
    -   * not specified, the backup will use the same
    -   * encryption configuration as the source backup by default, namely
    -   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] =
    -   * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +   * Optional. The encryption configuration used to encrypt the backup. If this
    +   * field is not specified, the backup will use the same encryption
    +   * configuration as the source backup by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +   * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
        * 
    * * @@ -195,11 +195,11 @@ public interface CopyBackupRequestOrBuilder * * *
    -   * Optional. The encryption configuration used to encrypt the backup. If this field is
    -   * not specified, the backup will use the same
    -   * encryption configuration as the source backup by default, namely
    -   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] =
    -   * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +   * Optional. The encryption configuration used to encrypt the backup. If this
    +   * field is not specified, the backup will use the same encryption
    +   * configuration as the source backup by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +   * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
        * 
    * * @@ -213,11 +213,11 @@ public interface CopyBackupRequestOrBuilder * * *
    -   * Optional. The encryption configuration used to encrypt the backup. If this field is
    -   * not specified, the backup will use the same
    -   * encryption configuration as the source backup by default, namely
    -   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] =
    -   * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +   * Optional. The encryption configuration used to encrypt the backup. If this
    +   * field is not specified, the backup will use the same encryption
    +   * configuration as the source backup by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
    +   * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
        * 
    * * diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupEncryptionConfig.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupEncryptionConfig.java index 9e09936de85..1b0104e81ec 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupEncryptionConfig.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupEncryptionConfig.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** @@ -41,6 +41,7 @@ private CreateBackupEncryptionConfig(com.google.protobuf.GeneratedMessageV3.Buil private CreateBackupEncryptionConfig() { encryptionType_ = 0; kmsKeyName_ = ""; + kmsKeyNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); } @java.lang.Override @@ -91,9 +92,10 @@ public enum EncryptionType implements com.google.protobuf.ProtocolMessageEnum { *
          * Use the same encryption configuration as the database. This is the
          * default option when
    -     * [encryption_config][google.spanner.admin.database.v1.CreateBackupEncryptionConfig] is empty.
    -     * For example, if the database is using `Customer_Managed_Encryption`, the
    -     * backup will be using the same Cloud KMS key as the database.
    +     * [encryption_config][google.spanner.admin.database.v1.CreateBackupEncryptionConfig]
    +     * is empty. For example, if the database is using
    +     * `Customer_Managed_Encryption`, the backup will be using the same Cloud
    +     * KMS key as the database.
          * 
    * * USE_DATABASE_ENCRYPTION = 1; @@ -139,9 +141,10 @@ public enum EncryptionType implements com.google.protobuf.ProtocolMessageEnum { *
          * Use the same encryption configuration as the database. This is the
          * default option when
    -     * [encryption_config][google.spanner.admin.database.v1.CreateBackupEncryptionConfig] is empty.
    -     * For example, if the database is using `Customer_Managed_Encryption`, the
    -     * backup will be using the same Cloud KMS key as the database.
    +     * [encryption_config][google.spanner.admin.database.v1.CreateBackupEncryptionConfig]
    +     * is empty. For example, if the database is using
    +     * `Customer_Managed_Encryption`, the backup will be using the same Cloud
    +     * KMS key as the database.
          * 
    * * USE_DATABASE_ENCRYPTION = 1; @@ -310,8 +313,8 @@ public int getEncryptionTypeValue() { *
        * Optional. The Cloud KMS key that will be used to protect the backup.
        * This field should be set only when
    -   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] is
    -   * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +   * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
        * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
        * 
    * @@ -339,8 +342,8 @@ public java.lang.String getKmsKeyName() { *
        * Optional. The Cloud KMS key that will be used to protect the backup.
        * This field should be set only when
    -   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] is
    -   * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +   * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
        * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
        * 
    * @@ -363,6 +366,134 @@ public com.google.protobuf.ByteString getKmsKeyNameBytes() { } } + public static final int KMS_KEY_NAMES_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList kmsKeyNames_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the kmsKeyNames. + */ + public com.google.protobuf.ProtocolStringList getKmsKeyNamesList() { + return kmsKeyNames_; + } + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The count of kmsKeyNames. + */ + public int getKmsKeyNamesCount() { + return kmsKeyNames_.size(); + } + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The kmsKeyNames at the given index. + */ + public java.lang.String getKmsKeyNames(int index) { + return kmsKeyNames_.get(index); + } + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the kmsKeyNames at the given index. + */ + public com.google.protobuf.ByteString getKmsKeyNamesBytes(int index) { + return kmsKeyNames_.getByteString(index); + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -386,6 +517,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(kmsKeyName_)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 2, kmsKeyName_); } + for (int i = 0; i < kmsKeyNames_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, kmsKeyNames_.getRaw(i)); + } getUnknownFields().writeTo(output); } @@ -404,6 +538,14 @@ public int getSerializedSize() { if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(kmsKeyName_)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, kmsKeyName_); } + { + int dataSize = 0; + for (int i = 0; i < kmsKeyNames_.size(); i++) { + dataSize += computeStringSizeNoTag(kmsKeyNames_.getRaw(i)); + } + size += dataSize; + size += 1 * getKmsKeyNamesList().size(); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -422,6 +564,7 @@ public boolean equals(final java.lang.Object obj) { if (encryptionType_ != other.encryptionType_) return false; if (!getKmsKeyName().equals(other.getKmsKeyName())) return false; + if (!getKmsKeyNamesList().equals(other.getKmsKeyNamesList())) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @@ -437,6 +580,10 @@ public int hashCode() { hash = (53 * hash) + encryptionType_; hash = (37 * hash) + KMS_KEY_NAME_FIELD_NUMBER; hash = (53 * hash) + getKmsKeyName().hashCode(); + if (getKmsKeyNamesCount() > 0) { + hash = (37 * hash) + KMS_KEY_NAMES_FIELD_NUMBER; + hash = (53 * hash) + getKmsKeyNamesList().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -581,6 +728,7 @@ public Builder clear() { bitField0_ = 0; encryptionType_ = 0; kmsKeyName_ = ""; + kmsKeyNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); return this; } @@ -625,6 +773,10 @@ private void buildPartial0( if (((from_bitField0_ & 0x00000002) != 0)) { result.kmsKeyName_ = kmsKeyName_; } + if (((from_bitField0_ & 0x00000004) != 0)) { + kmsKeyNames_.makeImmutable(); + result.kmsKeyNames_ = kmsKeyNames_; + } } @java.lang.Override @@ -683,6 +835,16 @@ public Builder mergeFrom( bitField0_ |= 0x00000002; onChanged(); } + if (!other.kmsKeyNames_.isEmpty()) { + if (kmsKeyNames_.isEmpty()) { + kmsKeyNames_ = other.kmsKeyNames_; + bitField0_ |= 0x00000004; + } else { + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.addAll(other.kmsKeyNames_); + } + onChanged(); + } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; @@ -721,6 +883,13 @@ public Builder mergeFrom( bitField0_ |= 0x00000002; break; } // case 18 + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.add(s); + break; + } // case 26 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { @@ -853,8 +1022,8 @@ public Builder clearEncryptionType() { *
          * Optional. The Cloud KMS key that will be used to protect the backup.
          * This field should be set only when
    -     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] is
    -     * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
          * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
          * 
    * @@ -881,8 +1050,8 @@ public java.lang.String getKmsKeyName() { *
          * Optional. The Cloud KMS key that will be used to protect the backup.
          * This field should be set only when
    -     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] is
    -     * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
          * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
          * 
    * @@ -909,8 +1078,8 @@ public com.google.protobuf.ByteString getKmsKeyNameBytes() { *
          * Optional. The Cloud KMS key that will be used to protect the backup.
          * This field should be set only when
    -     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] is
    -     * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
          * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
          * 
    * @@ -936,8 +1105,8 @@ public Builder setKmsKeyName(java.lang.String value) { *
          * Optional. The Cloud KMS key that will be used to protect the backup.
          * This field should be set only when
    -     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] is
    -     * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
          * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
          * 
    * @@ -959,8 +1128,8 @@ public Builder clearKmsKeyName() { *
          * Optional. The Cloud KMS key that will be used to protect the backup.
          * This field should be set only when
    -     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] is
    -     * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
          * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
          * 
    * @@ -982,6 +1151,324 @@ public Builder setKmsKeyNameBytes(com.google.protobuf.ByteString value) { return this; } + private com.google.protobuf.LazyStringArrayList kmsKeyNames_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureKmsKeyNamesIsMutable() { + if (!kmsKeyNames_.isModifiable()) { + kmsKeyNames_ = new com.google.protobuf.LazyStringArrayList(kmsKeyNames_); + } + bitField0_ |= 0x00000004; + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the kmsKeyNames. + */ + public com.google.protobuf.ProtocolStringList getKmsKeyNamesList() { + kmsKeyNames_.makeImmutable(); + return kmsKeyNames_; + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The count of kmsKeyNames. + */ + public int getKmsKeyNamesCount() { + return kmsKeyNames_.size(); + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The kmsKeyNames at the given index. + */ + public java.lang.String getKmsKeyNames(int index) { + return kmsKeyNames_.get(index); + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the kmsKeyNames at the given index. + */ + public com.google.protobuf.ByteString getKmsKeyNamesBytes(int index) { + return kmsKeyNames_.getByteString(index); + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index to set the value at. + * @param value The kmsKeyNames to set. + * @return This builder for chaining. + */ + public Builder setKmsKeyNames(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.set(index, value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The kmsKeyNames to add. + * @return This builder for chaining. + */ + public Builder addKmsKeyNames(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param values The kmsKeyNames to add. + * @return This builder for chaining. + */ + public Builder addAllKmsKeyNames(java.lang.Iterable values) { + ensureKmsKeyNamesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, kmsKeyNames_); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearKmsKeyNames() { + kmsKeyNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + ; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * protect the backup. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the backup's instance configuration. Some examples:
    +     * * For single region instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For an instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes of the kmsKeyNames to add. + * @return This builder for chaining. + */ + public Builder addKmsKeyNamesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupEncryptionConfigOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupEncryptionConfigOrBuilder.java index 7c639eb2a94..526b314d184 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupEncryptionConfigOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupEncryptionConfigOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface CreateBackupEncryptionConfigOrBuilder @@ -60,8 +60,8 @@ public interface CreateBackupEncryptionConfigOrBuilder *
        * Optional. The Cloud KMS key that will be used to protect the backup.
        * This field should be set only when
    -   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] is
    -   * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +   * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
        * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
        * 
    * @@ -78,8 +78,8 @@ public interface CreateBackupEncryptionConfigOrBuilder *
        * Optional. The Cloud KMS key that will be used to protect the backup.
        * This field should be set only when
    -   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] is
    -   * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +   * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
        * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
        * 
    * @@ -90,4 +90,119 @@ public interface CreateBackupEncryptionConfigOrBuilder * @return The bytes for kmsKeyName. */ com.google.protobuf.ByteString getKmsKeyNameBytes(); + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the kmsKeyNames. + */ + java.util.List getKmsKeyNamesList(); + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The count of kmsKeyNames. + */ + int getKmsKeyNamesCount(); + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The kmsKeyNames at the given index. + */ + java.lang.String getKmsKeyNames(int index); + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * protect the backup. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the backup's instance configuration. Some examples:
    +   * * For single region instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For an instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the kmsKeyNames at the given index. + */ + com.google.protobuf.ByteString getKmsKeyNamesBytes(int index); } diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupMetadata.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupMetadata.java index a3bd040f9b3..a8272a520ad 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupMetadata.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupMetadata.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** @@ -175,7 +175,8 @@ public com.google.protobuf.ByteString getDatabaseBytes() { * *
        * The progress of the
    -   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation.
        * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -191,7 +192,8 @@ public boolean hasProgress() { * *
        * The progress of the
    -   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation.
        * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -209,7 +211,8 @@ public com.google.spanner.admin.database.v1.OperationProgress getProgress() { * *
        * The progress of the
    -   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation.
        * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -967,7 +970,8 @@ public Builder setDatabaseBytes(com.google.protobuf.ByteString value) { * *
          * The progress of the
    -     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation.
          * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -982,7 +986,8 @@ public boolean hasProgress() { * *
          * The progress of the
    -     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation.
          * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -1003,7 +1008,8 @@ public com.google.spanner.admin.database.v1.OperationProgress getProgress() { * *
          * The progress of the
    -     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation.
          * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -1026,7 +1032,8 @@ public Builder setProgress(com.google.spanner.admin.database.v1.OperationProgres * *
          * The progress of the
    -     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation.
          * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -1047,7 +1054,8 @@ public Builder setProgress( * *
          * The progress of the
    -     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation.
          * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -1076,7 +1084,8 @@ public Builder mergeProgress(com.google.spanner.admin.database.v1.OperationProgr * *
          * The progress of the
    -     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation.
          * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -1096,7 +1105,8 @@ public Builder clearProgress() { * *
          * The progress of the
    -     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation.
          * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -1111,7 +1121,8 @@ public com.google.spanner.admin.database.v1.OperationProgress.Builder getProgres * *
          * The progress of the
    -     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation.
          * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -1130,7 +1141,8 @@ public com.google.spanner.admin.database.v1.OperationProgressOrBuilder getProgre * *
          * The progress of the
    -     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    +     * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +     * operation.
          * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupMetadataOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupMetadataOrBuilder.java index 89f2d482ede..43b55e5b139 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupMetadataOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupMetadataOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface CreateBackupMetadataOrBuilder @@ -79,7 +79,8 @@ public interface CreateBackupMetadataOrBuilder * *
        * The progress of the
    -   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation.
        * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -92,7 +93,8 @@ public interface CreateBackupMetadataOrBuilder * *
        * The progress of the
    -   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation.
        * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; @@ -105,7 +107,8 @@ public interface CreateBackupMetadataOrBuilder * *
        * The progress of the
    -   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation.
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
    +   * operation.
        * 
    * * .google.spanner.admin.database.v1.OperationProgress progress = 3; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupRequest.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupRequest.java index ab34f8aa33a..61f93dc4347 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupRequest.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupRequest.java @@ -16,14 +16,15 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** * * *
    - * The request for [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
    + * The request for
    + * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
      * 
    * * Protobuf type {@code google.spanner.admin.database.v1.CreateBackupRequest} @@ -247,11 +248,11 @@ public com.google.spanner.admin.database.v1.BackupOrBuilder getBackupOrBuilder() * * *
    -   * Optional. The encryption configuration used to encrypt the backup. If this field is
    -   * not specified, the backup will use the same
    -   * encryption configuration as the database by default, namely
    -   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] =
    -   * `USE_DATABASE_ENCRYPTION`.
    +   * Optional. The encryption configuration used to encrypt the backup. If this
    +   * field is not specified, the backup will use the same encryption
    +   * configuration as the database by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +   * = `USE_DATABASE_ENCRYPTION`.
        * 
    * * @@ -268,11 +269,11 @@ public boolean hasEncryptionConfig() { * * *
    -   * Optional. The encryption configuration used to encrypt the backup. If this field is
    -   * not specified, the backup will use the same
    -   * encryption configuration as the database by default, namely
    -   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] =
    -   * `USE_DATABASE_ENCRYPTION`.
    +   * Optional. The encryption configuration used to encrypt the backup. If this
    +   * field is not specified, the backup will use the same encryption
    +   * configuration as the database by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +   * = `USE_DATABASE_ENCRYPTION`.
        * 
    * * @@ -291,11 +292,11 @@ public com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig getEncr * * *
    -   * Optional. The encryption configuration used to encrypt the backup. If this field is
    -   * not specified, the backup will use the same
    -   * encryption configuration as the database by default, namely
    -   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] =
    -   * `USE_DATABASE_ENCRYPTION`.
    +   * Optional. The encryption configuration used to encrypt the backup. If this
    +   * field is not specified, the backup will use the same encryption
    +   * configuration as the database by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +   * = `USE_DATABASE_ENCRYPTION`.
        * 
    * * @@ -511,7 +512,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
    -   * The request for [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
    +   * The request for
    +   * [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
        * 
    * * Protobuf type {@code google.spanner.admin.database.v1.CreateBackupRequest} @@ -1225,11 +1227,11 @@ public com.google.spanner.admin.database.v1.BackupOrBuilder getBackupOrBuilder() * * *
    -     * Optional. The encryption configuration used to encrypt the backup. If this field is
    -     * not specified, the backup will use the same
    -     * encryption configuration as the database by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] =
    -     * `USE_DATABASE_ENCRYPTION`.
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the database by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * = `USE_DATABASE_ENCRYPTION`.
          * 
    * * @@ -1245,11 +1247,11 @@ public boolean hasEncryptionConfig() { * * *
    -     * Optional. The encryption configuration used to encrypt the backup. If this field is
    -     * not specified, the backup will use the same
    -     * encryption configuration as the database by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] =
    -     * `USE_DATABASE_ENCRYPTION`.
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the database by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * = `USE_DATABASE_ENCRYPTION`.
          * 
    * * @@ -1271,11 +1273,11 @@ public com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig getEncr * * *
    -     * Optional. The encryption configuration used to encrypt the backup. If this field is
    -     * not specified, the backup will use the same
    -     * encryption configuration as the database by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] =
    -     * `USE_DATABASE_ENCRYPTION`.
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the database by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * = `USE_DATABASE_ENCRYPTION`.
          * 
    * * @@ -1300,11 +1302,11 @@ public Builder setEncryptionConfig( * * *
    -     * Optional. The encryption configuration used to encrypt the backup. If this field is
    -     * not specified, the backup will use the same
    -     * encryption configuration as the database by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] =
    -     * `USE_DATABASE_ENCRYPTION`.
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the database by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * = `USE_DATABASE_ENCRYPTION`.
          * 
    * * @@ -1326,11 +1328,11 @@ public Builder setEncryptionConfig( * * *
    -     * Optional. The encryption configuration used to encrypt the backup. If this field is
    -     * not specified, the backup will use the same
    -     * encryption configuration as the database by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] =
    -     * `USE_DATABASE_ENCRYPTION`.
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the database by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * = `USE_DATABASE_ENCRYPTION`.
          * 
    * * @@ -1362,11 +1364,11 @@ public Builder mergeEncryptionConfig( * * *
    -     * Optional. The encryption configuration used to encrypt the backup. If this field is
    -     * not specified, the backup will use the same
    -     * encryption configuration as the database by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] =
    -     * `USE_DATABASE_ENCRYPTION`.
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the database by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * = `USE_DATABASE_ENCRYPTION`.
          * 
    * * @@ -1387,11 +1389,11 @@ public Builder clearEncryptionConfig() { * * *
    -     * Optional. The encryption configuration used to encrypt the backup. If this field is
    -     * not specified, the backup will use the same
    -     * encryption configuration as the database by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] =
    -     * `USE_DATABASE_ENCRYPTION`.
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the database by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * = `USE_DATABASE_ENCRYPTION`.
          * 
    * * @@ -1408,11 +1410,11 @@ public Builder clearEncryptionConfig() { * * *
    -     * Optional. The encryption configuration used to encrypt the backup. If this field is
    -     * not specified, the backup will use the same
    -     * encryption configuration as the database by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] =
    -     * `USE_DATABASE_ENCRYPTION`.
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the database by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * = `USE_DATABASE_ENCRYPTION`.
          * 
    * * @@ -1433,11 +1435,11 @@ public Builder clearEncryptionConfig() { * * *
    -     * Optional. The encryption configuration used to encrypt the backup. If this field is
    -     * not specified, the backup will use the same
    -     * encryption configuration as the database by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] =
    -     * `USE_DATABASE_ENCRYPTION`.
    +     * Optional. The encryption configuration used to encrypt the backup. If this
    +     * field is not specified, the backup will use the same encryption
    +     * configuration as the database by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +     * = `USE_DATABASE_ENCRYPTION`.
          * 
    * * diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupRequestOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupRequestOrBuilder.java index 63535e00207..bcd63fc198b 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface CreateBackupRequestOrBuilder @@ -137,11 +137,11 @@ public interface CreateBackupRequestOrBuilder * * *
    -   * Optional. The encryption configuration used to encrypt the backup. If this field is
    -   * not specified, the backup will use the same
    -   * encryption configuration as the database by default, namely
    -   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] =
    -   * `USE_DATABASE_ENCRYPTION`.
    +   * Optional. The encryption configuration used to encrypt the backup. If this
    +   * field is not specified, the backup will use the same encryption
    +   * configuration as the database by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +   * = `USE_DATABASE_ENCRYPTION`.
        * 
    * * @@ -155,11 +155,11 @@ public interface CreateBackupRequestOrBuilder * * *
    -   * Optional. The encryption configuration used to encrypt the backup. If this field is
    -   * not specified, the backup will use the same
    -   * encryption configuration as the database by default, namely
    -   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] =
    -   * `USE_DATABASE_ENCRYPTION`.
    +   * Optional. The encryption configuration used to encrypt the backup. If this
    +   * field is not specified, the backup will use the same encryption
    +   * configuration as the database by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +   * = `USE_DATABASE_ENCRYPTION`.
        * 
    * * @@ -173,11 +173,11 @@ public interface CreateBackupRequestOrBuilder * * *
    -   * Optional. The encryption configuration used to encrypt the backup. If this field is
    -   * not specified, the backup will use the same
    -   * encryption configuration as the database by default, namely
    -   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] =
    -   * `USE_DATABASE_ENCRYPTION`.
    +   * Optional. The encryption configuration used to encrypt the backup. If this
    +   * field is not specified, the backup will use the same encryption
    +   * configuration as the database by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
    +   * = `USE_DATABASE_ENCRYPTION`.
        * 
    * * diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupScheduleRequest.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupScheduleRequest.java new file mode 100644 index 00000000000..023efe0cc73 --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupScheduleRequest.java @@ -0,0 +1,1152 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/backup_schedule.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [CreateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.CreateBackupScheduleRequest} + */ +public final class CreateBackupScheduleRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.CreateBackupScheduleRequest) + CreateBackupScheduleRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use CreateBackupScheduleRequest.newBuilder() to construct. + private CreateBackupScheduleRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private CreateBackupScheduleRequest() { + parent_ = ""; + backupScheduleId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new CreateBackupScheduleRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_CreateBackupScheduleRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_CreateBackupScheduleRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest.class, + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest.Builder.class); + } + + private int bitField0_; + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + /** + * + * + *
    +   * Required. The name of the database that this backup schedule applies to.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + /** + * + * + *
    +   * Required. The name of the database that this backup schedule applies to.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int BACKUP_SCHEDULE_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object backupScheduleId_ = ""; + /** + * + * + *
    +   * Required. The Id to use for the backup schedule. The `backup_schedule_id`
    +   * appended to `parent` forms the full backup schedule name of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +   * 
    + * + * string backup_schedule_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The backupScheduleId. + */ + @java.lang.Override + public java.lang.String getBackupScheduleId() { + java.lang.Object ref = backupScheduleId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + backupScheduleId_ = s; + return s; + } + } + /** + * + * + *
    +   * Required. The Id to use for the backup schedule. The `backup_schedule_id`
    +   * appended to `parent` forms the full backup schedule name of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +   * 
    + * + * string backup_schedule_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for backupScheduleId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getBackupScheduleIdBytes() { + java.lang.Object ref = backupScheduleId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + backupScheduleId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int BACKUP_SCHEDULE_FIELD_NUMBER = 3; + private com.google.spanner.admin.database.v1.BackupSchedule backupSchedule_; + /** + * + * + *
    +   * Required. The backup schedule to create.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the backupSchedule field is set. + */ + @java.lang.Override + public boolean hasBackupSchedule() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + *
    +   * Required. The backup schedule to create.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The backupSchedule. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupSchedule getBackupSchedule() { + return backupSchedule_ == null + ? com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance() + : backupSchedule_; + } + /** + * + * + *
    +   * Required. The backup schedule to create.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupScheduleOrBuilder getBackupScheduleOrBuilder() { + return backupSchedule_ == null + ? com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance() + : backupSchedule_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(backupScheduleId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, backupScheduleId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getBackupSchedule()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(backupScheduleId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, backupScheduleId_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getBackupSchedule()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.CreateBackupScheduleRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest other = + (com.google.spanner.admin.database.v1.CreateBackupScheduleRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getBackupScheduleId().equals(other.getBackupScheduleId())) return false; + if (hasBackupSchedule() != other.hasBackupSchedule()) return false; + if (hasBackupSchedule()) { + if (!getBackupSchedule().equals(other.getBackupSchedule())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + BACKUP_SCHEDULE_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupScheduleId().hashCode(); + if (hasBackupSchedule()) { + hash = (37 * hash) + BACKUP_SCHEDULE_FIELD_NUMBER; + hash = (53 * hash) + getBackupSchedule().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
    +   * The request for
    +   * [CreateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.CreateBackupScheduleRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.CreateBackupScheduleRequest) + com.google.spanner.admin.database.v1.CreateBackupScheduleRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_CreateBackupScheduleRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_CreateBackupScheduleRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest.class, + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.CreateBackupScheduleRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getBackupScheduleFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + backupScheduleId_ = ""; + backupSchedule_ = null; + if (backupScheduleBuilder_ != null) { + backupScheduleBuilder_.dispose(); + backupScheduleBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_CreateBackupScheduleRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupScheduleRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.CreateBackupScheduleRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupScheduleRequest build() { + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupScheduleRequest buildPartial() { + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest result = + new com.google.spanner.admin.database.v1.CreateBackupScheduleRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.backupScheduleId_ = backupScheduleId_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.backupSchedule_ = + backupScheduleBuilder_ == null ? backupSchedule_ : backupScheduleBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.CreateBackupScheduleRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.CreateBackupScheduleRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.database.v1.CreateBackupScheduleRequest other) { + if (other + == com.google.spanner.admin.database.v1.CreateBackupScheduleRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getBackupScheduleId().isEmpty()) { + backupScheduleId_ = other.backupScheduleId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasBackupSchedule()) { + mergeBackupSchedule(other.getBackupSchedule()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + backupScheduleId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage(getBackupScheduleFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + /** + * + * + *
    +     * Required. The name of the database that this backup schedule applies to.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
    +     * Required. The name of the database that this backup schedule applies to.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
    +     * Required. The name of the database that this backup schedule applies to.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. The name of the database that this backup schedule applies to.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. The name of the database that this backup schedule applies to.
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object backupScheduleId_ = ""; + /** + * + * + *
    +     * Required. The Id to use for the backup schedule. The `backup_schedule_id`
    +     * appended to `parent` forms the full backup schedule name of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * string backup_schedule_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The backupScheduleId. + */ + public java.lang.String getBackupScheduleId() { + java.lang.Object ref = backupScheduleId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + backupScheduleId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
    +     * Required. The Id to use for the backup schedule. The `backup_schedule_id`
    +     * appended to `parent` forms the full backup schedule name of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * string backup_schedule_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for backupScheduleId. + */ + public com.google.protobuf.ByteString getBackupScheduleIdBytes() { + java.lang.Object ref = backupScheduleId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + backupScheduleId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
    +     * Required. The Id to use for the backup schedule. The `backup_schedule_id`
    +     * appended to `parent` forms the full backup schedule name of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * string backup_schedule_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The backupScheduleId to set. + * @return This builder for chaining. + */ + public Builder setBackupScheduleId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + backupScheduleId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. The Id to use for the backup schedule. The `backup_schedule_id`
    +     * appended to `parent` forms the full backup schedule name of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * string backup_schedule_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearBackupScheduleId() { + backupScheduleId_ = getDefaultInstance().getBackupScheduleId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. The Id to use for the backup schedule. The `backup_schedule_id`
    +     * appended to `parent` forms the full backup schedule name of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * string backup_schedule_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for backupScheduleId to set. + * @return This builder for chaining. + */ + public Builder setBackupScheduleIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + backupScheduleId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.spanner.admin.database.v1.BackupSchedule backupSchedule_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.database.v1.BackupSchedule, + com.google.spanner.admin.database.v1.BackupSchedule.Builder, + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder> + backupScheduleBuilder_; + /** + * + * + *
    +     * Required. The backup schedule to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the backupSchedule field is set. + */ + public boolean hasBackupSchedule() { + return ((bitField0_ & 0x00000004) != 0); + } + /** + * + * + *
    +     * Required. The backup schedule to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The backupSchedule. + */ + public com.google.spanner.admin.database.v1.BackupSchedule getBackupSchedule() { + if (backupScheduleBuilder_ == null) { + return backupSchedule_ == null + ? com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance() + : backupSchedule_; + } else { + return backupScheduleBuilder_.getMessage(); + } + } + /** + * + * + *
    +     * Required. The backup schedule to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setBackupSchedule(com.google.spanner.admin.database.v1.BackupSchedule value) { + if (backupScheduleBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + backupSchedule_ = value; + } else { + backupScheduleBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. The backup schedule to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setBackupSchedule( + com.google.spanner.admin.database.v1.BackupSchedule.Builder builderForValue) { + if (backupScheduleBuilder_ == null) { + backupSchedule_ = builderForValue.build(); + } else { + backupScheduleBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. The backup schedule to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeBackupSchedule(com.google.spanner.admin.database.v1.BackupSchedule value) { + if (backupScheduleBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && backupSchedule_ != null + && backupSchedule_ + != com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance()) { + getBackupScheduleBuilder().mergeFrom(value); + } else { + backupSchedule_ = value; + } + } else { + backupScheduleBuilder_.mergeFrom(value); + } + if (backupSchedule_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + /** + * + * + *
    +     * Required. The backup schedule to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearBackupSchedule() { + bitField0_ = (bitField0_ & ~0x00000004); + backupSchedule_ = null; + if (backupScheduleBuilder_ != null) { + backupScheduleBuilder_.dispose(); + backupScheduleBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. The backup schedule to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.database.v1.BackupSchedule.Builder getBackupScheduleBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getBackupScheduleFieldBuilder().getBuilder(); + } + /** + * + * + *
    +     * Required. The backup schedule to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.database.v1.BackupScheduleOrBuilder + getBackupScheduleOrBuilder() { + if (backupScheduleBuilder_ != null) { + return backupScheduleBuilder_.getMessageOrBuilder(); + } else { + return backupSchedule_ == null + ? com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance() + : backupSchedule_; + } + } + /** + * + * + *
    +     * Required. The backup schedule to create.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.database.v1.BackupSchedule, + com.google.spanner.admin.database.v1.BackupSchedule.Builder, + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder> + getBackupScheduleFieldBuilder() { + if (backupScheduleBuilder_ == null) { + backupScheduleBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.database.v1.BackupSchedule, + com.google.spanner.admin.database.v1.BackupSchedule.Builder, + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder>( + getBackupSchedule(), getParentForChildren(), isClean()); + backupSchedule_ = null; + } + return backupScheduleBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.CreateBackupScheduleRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.CreateBackupScheduleRequest) + private static final com.google.spanner.admin.database.v1.CreateBackupScheduleRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.CreateBackupScheduleRequest(); + } + + public static com.google.spanner.admin.database.v1.CreateBackupScheduleRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateBackupScheduleRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CreateBackupScheduleRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupScheduleRequestOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupScheduleRequestOrBuilder.java new file mode 100644 index 00000000000..76e3acbdcaa --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateBackupScheduleRequestOrBuilder.java @@ -0,0 +1,125 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/backup_schedule.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.database.v1; + +public interface CreateBackupScheduleRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.CreateBackupScheduleRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the database that this backup schedule applies to.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + /** + * + * + *
    +   * Required. The name of the database that this backup schedule applies to.
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
    +   * Required. The Id to use for the backup schedule. The `backup_schedule_id`
    +   * appended to `parent` forms the full backup schedule name of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +   * 
    + * + * string backup_schedule_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The backupScheduleId. + */ + java.lang.String getBackupScheduleId(); + /** + * + * + *
    +   * Required. The Id to use for the backup schedule. The `backup_schedule_id`
    +   * appended to `parent` forms the full backup schedule name of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +   * 
    + * + * string backup_schedule_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for backupScheduleId. + */ + com.google.protobuf.ByteString getBackupScheduleIdBytes(); + + /** + * + * + *
    +   * Required. The backup schedule to create.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the backupSchedule field is set. + */ + boolean hasBackupSchedule(); + /** + * + * + *
    +   * Required. The backup schedule to create.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The backupSchedule. + */ + com.google.spanner.admin.database.v1.BackupSchedule getBackupSchedule(); + /** + * + * + *
    +   * Required. The backup schedule to create.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 3 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder getBackupScheduleOrBuilder(); +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseMetadata.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseMetadata.java index bfb92410086..cde7548fd07 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseMetadata.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseMetadata.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseMetadataOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseMetadataOrBuilder.java index b67d3f8bfe1..f2fea84cb35 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseMetadataOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseMetadataOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface CreateDatabaseMetadataOrBuilder diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseRequest.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseRequest.java index 57b155ae65a..0ff1ec29481 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseRequest.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseRequest.java @@ -16,14 +16,15 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** * * *
    - * The request for [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
    + * The request for
    + * [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
      * 
    * * Protobuf type {@code google.spanner.admin.database.v1.CreateDatabaseRequest} @@ -266,8 +267,8 @@ public com.google.protobuf.ByteString getExtraStatementsBytes(int index) { * * *
    -   * Optional. The encryption configuration for the database. If this field is not
    -   * specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +   * Optional. The encryption configuration for the database. If this field is
    +   * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
        * Google default encryption.
        * 
    * @@ -285,8 +286,8 @@ public boolean hasEncryptionConfig() { * * *
    -   * Optional. The encryption configuration for the database. If this field is not
    -   * specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +   * Optional. The encryption configuration for the database. If this field is
    +   * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
        * Google default encryption.
        * 
    * @@ -306,8 +307,8 @@ public com.google.spanner.admin.database.v1.EncryptionConfig getEncryptionConfig * * *
    -   * Optional. The encryption configuration for the database. If this field is not
    -   * specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +   * Optional. The encryption configuration for the database. If this field is
    +   * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
        * Google default encryption.
        * 
    * @@ -621,7 +622,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
    -   * The request for [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
    +   * The request for
    +   * [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
        * 
    * * Protobuf type {@code google.spanner.admin.database.v1.CreateDatabaseRequest} @@ -1352,8 +1354,8 @@ public Builder addExtraStatementsBytes(com.google.protobuf.ByteString value) { * * *
    -     * Optional. The encryption configuration for the database. If this field is not
    -     * specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +     * Optional. The encryption configuration for the database. If this field is
    +     * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
          * Google default encryption.
          * 
    * @@ -1370,8 +1372,8 @@ public boolean hasEncryptionConfig() { * * *
    -     * Optional. The encryption configuration for the database. If this field is not
    -     * specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +     * Optional. The encryption configuration for the database. If this field is
    +     * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
          * Google default encryption.
          * 
    * @@ -1394,8 +1396,8 @@ public com.google.spanner.admin.database.v1.EncryptionConfig getEncryptionConfig * * *
    -     * Optional. The encryption configuration for the database. If this field is not
    -     * specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +     * Optional. The encryption configuration for the database. If this field is
    +     * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
          * Google default encryption.
          * 
    * @@ -1421,8 +1423,8 @@ public Builder setEncryptionConfig( * * *
    -     * Optional. The encryption configuration for the database. If this field is not
    -     * specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +     * Optional. The encryption configuration for the database. If this field is
    +     * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
          * Google default encryption.
          * 
    * @@ -1445,8 +1447,8 @@ public Builder setEncryptionConfig( * * *
    -     * Optional. The encryption configuration for the database. If this field is not
    -     * specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +     * Optional. The encryption configuration for the database. If this field is
    +     * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
          * Google default encryption.
          * 
    * @@ -1478,8 +1480,8 @@ public Builder mergeEncryptionConfig( * * *
    -     * Optional. The encryption configuration for the database. If this field is not
    -     * specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +     * Optional. The encryption configuration for the database. If this field is
    +     * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
          * Google default encryption.
          * 
    * @@ -1501,8 +1503,8 @@ public Builder clearEncryptionConfig() { * * *
    -     * Optional. The encryption configuration for the database. If this field is not
    -     * specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +     * Optional. The encryption configuration for the database. If this field is
    +     * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
          * Google default encryption.
          * 
    * @@ -1520,8 +1522,8 @@ public Builder clearEncryptionConfig() { * * *
    -     * Optional. The encryption configuration for the database. If this field is not
    -     * specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +     * Optional. The encryption configuration for the database. If this field is
    +     * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
          * Google default encryption.
          * 
    * @@ -1543,8 +1545,8 @@ public Builder clearEncryptionConfig() { * * *
    -     * Optional. The encryption configuration for the database. If this field is not
    -     * specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +     * Optional. The encryption configuration for the database. If this field is
    +     * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
          * Google default encryption.
          * 
    * diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseRequestOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseRequestOrBuilder.java index db215630c65..8abc8920f33 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CreateDatabaseRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface CreateDatabaseRequestOrBuilder @@ -155,8 +155,8 @@ public interface CreateDatabaseRequestOrBuilder * * *
    -   * Optional. The encryption configuration for the database. If this field is not
    -   * specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +   * Optional. The encryption configuration for the database. If this field is
    +   * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
        * Google default encryption.
        * 
    * @@ -171,8 +171,8 @@ public interface CreateDatabaseRequestOrBuilder * * *
    -   * Optional. The encryption configuration for the database. If this field is not
    -   * specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +   * Optional. The encryption configuration for the database. If this field is
    +   * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
        * Google default encryption.
        * 
    * @@ -187,8 +187,8 @@ public interface CreateDatabaseRequestOrBuilder * * *
    -   * Optional. The encryption configuration for the database. If this field is not
    -   * specified, Cloud Spanner will encrypt/decrypt all data at rest using
    +   * Optional. The encryption configuration for the database. If this field is
    +   * not specified, Cloud Spanner will encrypt/decrypt all data at rest using
        * Google default encryption.
        * 
    * diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CrontabSpec.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CrontabSpec.java new file mode 100644 index 00000000000..d1d27938879 --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CrontabSpec.java @@ -0,0 +1,1269 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/backup_schedule.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * CrontabSpec can be used to specify the version time and frequency at
    + * which the backup should be created.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.CrontabSpec} + */ +public final class CrontabSpec extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.CrontabSpec) + CrontabSpecOrBuilder { + private static final long serialVersionUID = 0L; + // Use CrontabSpec.newBuilder() to construct. + private CrontabSpec(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private CrontabSpec() { + text_ = ""; + timeZone_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new CrontabSpec(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_CrontabSpec_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_CrontabSpec_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.CrontabSpec.class, + com.google.spanner.admin.database.v1.CrontabSpec.Builder.class); + } + + private int bitField0_; + public static final int TEXT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object text_ = ""; + /** + * + * + *
    +   * Required. Textual representation of the crontab. User can customize the
    +   * backup frequency and the backup version time using the cron
    +   * expression. The version time must be in UTC timzeone.
    +   *
    +   * The backup will contain an externally consistent copy of the
    +   * database at the version time. Allowed frequencies are 12 hour, 1 day,
    +   * 1 week and 1 month. Examples of valid cron specifications:
    +   *   * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC.
    +   *   * `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC.
    +   *   * `0 2 * * * `    : once a day at 2 past midnight in UTC.
    +   *   * `0 2 * * 0 `    : once a week every Sunday at 2 past midnight in UTC.
    +   *   * `0 2 8 * * `    : once a month on 8th day at 2 past midnight in UTC.
    +   * 
    + * + * string text = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The text. + */ + @java.lang.Override + public java.lang.String getText() { + java.lang.Object ref = text_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + text_ = s; + return s; + } + } + /** + * + * + *
    +   * Required. Textual representation of the crontab. User can customize the
    +   * backup frequency and the backup version time using the cron
    +   * expression. The version time must be in UTC timzeone.
    +   *
    +   * The backup will contain an externally consistent copy of the
    +   * database at the version time. Allowed frequencies are 12 hour, 1 day,
    +   * 1 week and 1 month. Examples of valid cron specifications:
    +   *   * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC.
    +   *   * `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC.
    +   *   * `0 2 * * * `    : once a day at 2 past midnight in UTC.
    +   *   * `0 2 * * 0 `    : once a week every Sunday at 2 past midnight in UTC.
    +   *   * `0 2 8 * * `    : once a month on 8th day at 2 past midnight in UTC.
    +   * 
    + * + * string text = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for text. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTextBytes() { + java.lang.Object ref = text_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + text_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TIME_ZONE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object timeZone_ = ""; + /** + * + * + *
    +   * Output only. The time zone of the times in `CrontabSpec.text`. Currently
    +   * only UTC is supported.
    +   * 
    + * + * string time_zone = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The timeZone. + */ + @java.lang.Override + public java.lang.String getTimeZone() { + java.lang.Object ref = timeZone_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + timeZone_ = s; + return s; + } + } + /** + * + * + *
    +   * Output only. The time zone of the times in `CrontabSpec.text`. Currently
    +   * only UTC is supported.
    +   * 
    + * + * string time_zone = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for timeZone. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTimeZoneBytes() { + java.lang.Object ref = timeZone_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + timeZone_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CREATION_WINDOW_FIELD_NUMBER = 3; + private com.google.protobuf.Duration creationWindow_; + /** + * + * + *
    +   * Output only. Schedule backups will contain an externally consistent copy
    +   * of the database at the version time specified in
    +   * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +   * of the scheduled backups at that version time. Spanner will initiate
    +   * the creation of scheduled backups within the time window bounded by the
    +   * version_time specified in `schedule_spec.cron_spec` and version_time +
    +   * `creation_window`.
    +   * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the creationWindow field is set. + */ + @java.lang.Override + public boolean hasCreationWindow() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + *
    +   * Output only. Schedule backups will contain an externally consistent copy
    +   * of the database at the version time specified in
    +   * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +   * of the scheduled backups at that version time. Spanner will initiate
    +   * the creation of scheduled backups within the time window bounded by the
    +   * version_time specified in `schedule_spec.cron_spec` and version_time +
    +   * `creation_window`.
    +   * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The creationWindow. + */ + @java.lang.Override + public com.google.protobuf.Duration getCreationWindow() { + return creationWindow_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : creationWindow_; + } + /** + * + * + *
    +   * Output only. Schedule backups will contain an externally consistent copy
    +   * of the database at the version time specified in
    +   * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +   * of the scheduled backups at that version time. Spanner will initiate
    +   * the creation of scheduled backups within the time window bounded by the
    +   * version_time specified in `schedule_spec.cron_spec` and version_time +
    +   * `creation_window`.
    +   * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.protobuf.DurationOrBuilder getCreationWindowOrBuilder() { + return creationWindow_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : creationWindow_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(text_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, text_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(timeZone_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, timeZone_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(3, getCreationWindow()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(text_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, text_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(timeZone_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, timeZone_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getCreationWindow()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.CrontabSpec)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.CrontabSpec other = + (com.google.spanner.admin.database.v1.CrontabSpec) obj; + + if (!getText().equals(other.getText())) return false; + if (!getTimeZone().equals(other.getTimeZone())) return false; + if (hasCreationWindow() != other.hasCreationWindow()) return false; + if (hasCreationWindow()) { + if (!getCreationWindow().equals(other.getCreationWindow())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TEXT_FIELD_NUMBER; + hash = (53 * hash) + getText().hashCode(); + hash = (37 * hash) + TIME_ZONE_FIELD_NUMBER; + hash = (53 * hash) + getTimeZone().hashCode(); + if (hasCreationWindow()) { + hash = (37 * hash) + CREATION_WINDOW_FIELD_NUMBER; + hash = (53 * hash) + getCreationWindow().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.CrontabSpec parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CrontabSpec parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CrontabSpec parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CrontabSpec parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CrontabSpec parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.CrontabSpec parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CrontabSpec parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CrontabSpec parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CrontabSpec parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CrontabSpec parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.CrontabSpec parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.CrontabSpec parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.admin.database.v1.CrontabSpec prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
    +   * CrontabSpec can be used to specify the version time and frequency at
    +   * which the backup should be created.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.CrontabSpec} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.CrontabSpec) + com.google.spanner.admin.database.v1.CrontabSpecOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_CrontabSpec_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_CrontabSpec_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.CrontabSpec.class, + com.google.spanner.admin.database.v1.CrontabSpec.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.CrontabSpec.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getCreationWindowFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + text_ = ""; + timeZone_ = ""; + creationWindow_ = null; + if (creationWindowBuilder_ != null) { + creationWindowBuilder_.dispose(); + creationWindowBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_CrontabSpec_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CrontabSpec getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.CrontabSpec.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CrontabSpec build() { + com.google.spanner.admin.database.v1.CrontabSpec result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CrontabSpec buildPartial() { + com.google.spanner.admin.database.v1.CrontabSpec result = + new com.google.spanner.admin.database.v1.CrontabSpec(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.database.v1.CrontabSpec result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.text_ = text_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.timeZone_ = timeZone_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000004) != 0)) { + result.creationWindow_ = + creationWindowBuilder_ == null ? creationWindow_ : creationWindowBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.CrontabSpec) { + return mergeFrom((com.google.spanner.admin.database.v1.CrontabSpec) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.CrontabSpec other) { + if (other == com.google.spanner.admin.database.v1.CrontabSpec.getDefaultInstance()) + return this; + if (!other.getText().isEmpty()) { + text_ = other.text_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getTimeZone().isEmpty()) { + timeZone_ = other.timeZone_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasCreationWindow()) { + mergeCreationWindow(other.getCreationWindow()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + text_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + timeZone_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage(getCreationWindowFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object text_ = ""; + /** + * + * + *
    +     * Required. Textual representation of the crontab. User can customize the
    +     * backup frequency and the backup version time using the cron
    +     * expression. The version time must be in UTC timzeone.
    +     *
    +     * The backup will contain an externally consistent copy of the
    +     * database at the version time. Allowed frequencies are 12 hour, 1 day,
    +     * 1 week and 1 month. Examples of valid cron specifications:
    +     *   * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC.
    +     *   * `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC.
    +     *   * `0 2 * * * `    : once a day at 2 past midnight in UTC.
    +     *   * `0 2 * * 0 `    : once a week every Sunday at 2 past midnight in UTC.
    +     *   * `0 2 8 * * `    : once a month on 8th day at 2 past midnight in UTC.
    +     * 
    + * + * string text = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The text. + */ + public java.lang.String getText() { + java.lang.Object ref = text_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + text_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
    +     * Required. Textual representation of the crontab. User can customize the
    +     * backup frequency and the backup version time using the cron
    +     * expression. The version time must be in UTC timzeone.
    +     *
    +     * The backup will contain an externally consistent copy of the
    +     * database at the version time. Allowed frequencies are 12 hour, 1 day,
    +     * 1 week and 1 month. Examples of valid cron specifications:
    +     *   * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC.
    +     *   * `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC.
    +     *   * `0 2 * * * `    : once a day at 2 past midnight in UTC.
    +     *   * `0 2 * * 0 `    : once a week every Sunday at 2 past midnight in UTC.
    +     *   * `0 2 8 * * `    : once a month on 8th day at 2 past midnight in UTC.
    +     * 
    + * + * string text = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for text. + */ + public com.google.protobuf.ByteString getTextBytes() { + java.lang.Object ref = text_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + text_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
    +     * Required. Textual representation of the crontab. User can customize the
    +     * backup frequency and the backup version time using the cron
    +     * expression. The version time must be in UTC timzeone.
    +     *
    +     * The backup will contain an externally consistent copy of the
    +     * database at the version time. Allowed frequencies are 12 hour, 1 day,
    +     * 1 week and 1 month. Examples of valid cron specifications:
    +     *   * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC.
    +     *   * `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC.
    +     *   * `0 2 * * * `    : once a day at 2 past midnight in UTC.
    +     *   * `0 2 * * 0 `    : once a week every Sunday at 2 past midnight in UTC.
    +     *   * `0 2 8 * * `    : once a month on 8th day at 2 past midnight in UTC.
    +     * 
    + * + * string text = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The text to set. + * @return This builder for chaining. + */ + public Builder setText(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + text_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. Textual representation of the crontab. User can customize the
    +     * backup frequency and the backup version time using the cron
    +     * expression. The version time must be in UTC timzeone.
    +     *
    +     * The backup will contain an externally consistent copy of the
    +     * database at the version time. Allowed frequencies are 12 hour, 1 day,
    +     * 1 week and 1 month. Examples of valid cron specifications:
    +     *   * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC.
    +     *   * `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC.
    +     *   * `0 2 * * * `    : once a day at 2 past midnight in UTC.
    +     *   * `0 2 * * 0 `    : once a week every Sunday at 2 past midnight in UTC.
    +     *   * `0 2 8 * * `    : once a month on 8th day at 2 past midnight in UTC.
    +     * 
    + * + * string text = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearText() { + text_ = getDefaultInstance().getText(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. Textual representation of the crontab. User can customize the
    +     * backup frequency and the backup version time using the cron
    +     * expression. The version time must be in UTC timzeone.
    +     *
    +     * The backup will contain an externally consistent copy of the
    +     * database at the version time. Allowed frequencies are 12 hour, 1 day,
    +     * 1 week and 1 month. Examples of valid cron specifications:
    +     *   * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC.
    +     *   * `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC.
    +     *   * `0 2 * * * `    : once a day at 2 past midnight in UTC.
    +     *   * `0 2 * * 0 `    : once a week every Sunday at 2 past midnight in UTC.
    +     *   * `0 2 8 * * `    : once a month on 8th day at 2 past midnight in UTC.
    +     * 
    + * + * string text = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for text to set. + * @return This builder for chaining. + */ + public Builder setTextBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + text_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object timeZone_ = ""; + /** + * + * + *
    +     * Output only. The time zone of the times in `CrontabSpec.text`. Currently
    +     * only UTC is supported.
    +     * 
    + * + * string time_zone = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The timeZone. + */ + public java.lang.String getTimeZone() { + java.lang.Object ref = timeZone_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + timeZone_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
    +     * Output only. The time zone of the times in `CrontabSpec.text`. Currently
    +     * only UTC is supported.
    +     * 
    + * + * string time_zone = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for timeZone. + */ + public com.google.protobuf.ByteString getTimeZoneBytes() { + java.lang.Object ref = timeZone_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + timeZone_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
    +     * Output only. The time zone of the times in `CrontabSpec.text`. Currently
    +     * only UTC is supported.
    +     * 
    + * + * string time_zone = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The timeZone to set. + * @return This builder for chaining. + */ + public Builder setTimeZone(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + timeZone_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
    +     * Output only. The time zone of the times in `CrontabSpec.text`. Currently
    +     * only UTC is supported.
    +     * 
    + * + * string time_zone = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearTimeZone() { + timeZone_ = getDefaultInstance().getTimeZone(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * + * + *
    +     * Output only. The time zone of the times in `CrontabSpec.text`. Currently
    +     * only UTC is supported.
    +     * 
    + * + * string time_zone = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for timeZone to set. + * @return This builder for chaining. + */ + public Builder setTimeZoneBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + timeZone_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private com.google.protobuf.Duration creationWindow_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + creationWindowBuilder_; + /** + * + * + *
    +     * Output only. Schedule backups will contain an externally consistent copy
    +     * of the database at the version time specified in
    +     * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +     * of the scheduled backups at that version time. Spanner will initiate
    +     * the creation of scheduled backups within the time window bounded by the
    +     * version_time specified in `schedule_spec.cron_spec` and version_time +
    +     * `creation_window`.
    +     * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the creationWindow field is set. + */ + public boolean hasCreationWindow() { + return ((bitField0_ & 0x00000004) != 0); + } + /** + * + * + *
    +     * Output only. Schedule backups will contain an externally consistent copy
    +     * of the database at the version time specified in
    +     * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +     * of the scheduled backups at that version time. Spanner will initiate
    +     * the creation of scheduled backups within the time window bounded by the
    +     * version_time specified in `schedule_spec.cron_spec` and version_time +
    +     * `creation_window`.
    +     * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The creationWindow. + */ + public com.google.protobuf.Duration getCreationWindow() { + if (creationWindowBuilder_ == null) { + return creationWindow_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : creationWindow_; + } else { + return creationWindowBuilder_.getMessage(); + } + } + /** + * + * + *
    +     * Output only. Schedule backups will contain an externally consistent copy
    +     * of the database at the version time specified in
    +     * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +     * of the scheduled backups at that version time. Spanner will initiate
    +     * the creation of scheduled backups within the time window bounded by the
    +     * version_time specified in `schedule_spec.cron_spec` and version_time +
    +     * `creation_window`.
    +     * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreationWindow(com.google.protobuf.Duration value) { + if (creationWindowBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + creationWindow_ = value; + } else { + creationWindowBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
    +     * Output only. Schedule backups will contain an externally consistent copy
    +     * of the database at the version time specified in
    +     * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +     * of the scheduled backups at that version time. Spanner will initiate
    +     * the creation of scheduled backups within the time window bounded by the
    +     * version_time specified in `schedule_spec.cron_spec` and version_time +
    +     * `creation_window`.
    +     * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreationWindow(com.google.protobuf.Duration.Builder builderForValue) { + if (creationWindowBuilder_ == null) { + creationWindow_ = builderForValue.build(); + } else { + creationWindowBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
    +     * Output only. Schedule backups will contain an externally consistent copy
    +     * of the database at the version time specified in
    +     * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +     * of the scheduled backups at that version time. Spanner will initiate
    +     * the creation of scheduled backups within the time window bounded by the
    +     * version_time specified in `schedule_spec.cron_spec` and version_time +
    +     * `creation_window`.
    +     * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCreationWindow(com.google.protobuf.Duration value) { + if (creationWindowBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && creationWindow_ != null + && creationWindow_ != com.google.protobuf.Duration.getDefaultInstance()) { + getCreationWindowBuilder().mergeFrom(value); + } else { + creationWindow_ = value; + } + } else { + creationWindowBuilder_.mergeFrom(value); + } + if (creationWindow_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + /** + * + * + *
    +     * Output only. Schedule backups will contain an externally consistent copy
    +     * of the database at the version time specified in
    +     * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +     * of the scheduled backups at that version time. Spanner will initiate
    +     * the creation of scheduled backups within the time window bounded by the
    +     * version_time specified in `schedule_spec.cron_spec` and version_time +
    +     * `creation_window`.
    +     * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCreationWindow() { + bitField0_ = (bitField0_ & ~0x00000004); + creationWindow_ = null; + if (creationWindowBuilder_ != null) { + creationWindowBuilder_.dispose(); + creationWindowBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
    +     * Output only. Schedule backups will contain an externally consistent copy
    +     * of the database at the version time specified in
    +     * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +     * of the scheduled backups at that version time. Spanner will initiate
    +     * the creation of scheduled backups within the time window bounded by the
    +     * version_time specified in `schedule_spec.cron_spec` and version_time +
    +     * `creation_window`.
    +     * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Duration.Builder getCreationWindowBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getCreationWindowFieldBuilder().getBuilder(); + } + /** + * + * + *
    +     * Output only. Schedule backups will contain an externally consistent copy
    +     * of the database at the version time specified in
    +     * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +     * of the scheduled backups at that version time. Spanner will initiate
    +     * the creation of scheduled backups within the time window bounded by the
    +     * version_time specified in `schedule_spec.cron_spec` and version_time +
    +     * `creation_window`.
    +     * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.DurationOrBuilder getCreationWindowOrBuilder() { + if (creationWindowBuilder_ != null) { + return creationWindowBuilder_.getMessageOrBuilder(); + } else { + return creationWindow_ == null + ? com.google.protobuf.Duration.getDefaultInstance() + : creationWindow_; + } + } + /** + * + * + *
    +     * Output only. Schedule backups will contain an externally consistent copy
    +     * of the database at the version time specified in
    +     * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +     * of the scheduled backups at that version time. Spanner will initiate
    +     * the creation of scheduled backups within the time window bounded by the
    +     * version_time specified in `schedule_spec.cron_spec` and version_time +
    +     * `creation_window`.
    +     * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + getCreationWindowFieldBuilder() { + if (creationWindowBuilder_ == null) { + creationWindowBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder>( + getCreationWindow(), getParentForChildren(), isClean()); + creationWindow_ = null; + } + return creationWindowBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.CrontabSpec) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.CrontabSpec) + private static final com.google.spanner.admin.database.v1.CrontabSpec DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.CrontabSpec(); + } + + public static com.google.spanner.admin.database.v1.CrontabSpec getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CrontabSpec parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.CrontabSpec getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CrontabSpecOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CrontabSpecOrBuilder.java new file mode 100644 index 00000000000..2365789b94f --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/CrontabSpecOrBuilder.java @@ -0,0 +1,159 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/backup_schedule.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.database.v1; + +public interface CrontabSpecOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.CrontabSpec) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. Textual representation of the crontab. User can customize the
    +   * backup frequency and the backup version time using the cron
    +   * expression. The version time must be in UTC timzeone.
    +   *
    +   * The backup will contain an externally consistent copy of the
    +   * database at the version time. Allowed frequencies are 12 hour, 1 day,
    +   * 1 week and 1 month. Examples of valid cron specifications:
    +   *   * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC.
    +   *   * `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC.
    +   *   * `0 2 * * * `    : once a day at 2 past midnight in UTC.
    +   *   * `0 2 * * 0 `    : once a week every Sunday at 2 past midnight in UTC.
    +   *   * `0 2 8 * * `    : once a month on 8th day at 2 past midnight in UTC.
    +   * 
    + * + * string text = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The text. + */ + java.lang.String getText(); + /** + * + * + *
    +   * Required. Textual representation of the crontab. User can customize the
    +   * backup frequency and the backup version time using the cron
    +   * expression. The version time must be in UTC timzeone.
    +   *
    +   * The backup will contain an externally consistent copy of the
    +   * database at the version time. Allowed frequencies are 12 hour, 1 day,
    +   * 1 week and 1 month. Examples of valid cron specifications:
    +   *   * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC.
    +   *   * `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC.
    +   *   * `0 2 * * * `    : once a day at 2 past midnight in UTC.
    +   *   * `0 2 * * 0 `    : once a week every Sunday at 2 past midnight in UTC.
    +   *   * `0 2 8 * * `    : once a month on 8th day at 2 past midnight in UTC.
    +   * 
    + * + * string text = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for text. + */ + com.google.protobuf.ByteString getTextBytes(); + + /** + * + * + *
    +   * Output only. The time zone of the times in `CrontabSpec.text`. Currently
    +   * only UTC is supported.
    +   * 
    + * + * string time_zone = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The timeZone. + */ + java.lang.String getTimeZone(); + /** + * + * + *
    +   * Output only. The time zone of the times in `CrontabSpec.text`. Currently
    +   * only UTC is supported.
    +   * 
    + * + * string time_zone = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for timeZone. + */ + com.google.protobuf.ByteString getTimeZoneBytes(); + + /** + * + * + *
    +   * Output only. Schedule backups will contain an externally consistent copy
    +   * of the database at the version time specified in
    +   * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +   * of the scheduled backups at that version time. Spanner will initiate
    +   * the creation of scheduled backups within the time window bounded by the
    +   * version_time specified in `schedule_spec.cron_spec` and version_time +
    +   * `creation_window`.
    +   * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the creationWindow field is set. + */ + boolean hasCreationWindow(); + /** + * + * + *
    +   * Output only. Schedule backups will contain an externally consistent copy
    +   * of the database at the version time specified in
    +   * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +   * of the scheduled backups at that version time. Spanner will initiate
    +   * the creation of scheduled backups within the time window bounded by the
    +   * version_time specified in `schedule_spec.cron_spec` and version_time +
    +   * `creation_window`.
    +   * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The creationWindow. + */ + com.google.protobuf.Duration getCreationWindow(); + /** + * + * + *
    +   * Output only. Schedule backups will contain an externally consistent copy
    +   * of the database at the version time specified in
    +   * `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
    +   * of the scheduled backups at that version time. Spanner will initiate
    +   * the creation of scheduled backups within the time window bounded by the
    +   * version_time specified in `schedule_spec.cron_spec` and version_time +
    +   * `creation_window`.
    +   * 
    + * + * + * .google.protobuf.Duration creation_window = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.DurationOrBuilder getCreationWindowOrBuilder(); +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/Database.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/Database.java index 0fe161abbbe..402b842c1b7 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/Database.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/Database.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** @@ -548,7 +548,8 @@ public com.google.spanner.admin.database.v1.EncryptionConfig getEncryptionConfig *
        * Output only. For databases that are using customer managed encryption, this
        * field contains the encryption information for the database, such as
    -   * encryption state and the Cloud KMS key versions that are in use.
    +   * all Cloud KMS key versions that are in use. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated.
        *
        * For databases that are using Google default or other types of encryption,
        * this field is empty.
    @@ -572,7 +573,8 @@ public com.google.spanner.admin.database.v1.EncryptionConfig getEncryptionConfig
        * 
        * Output only. For databases that are using customer managed encryption, this
        * field contains the encryption information for the database, such as
    -   * encryption state and the Cloud KMS key versions that are in use.
    +   * all Cloud KMS key versions that are in use. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated.
        *
        * For databases that are using Google default or other types of encryption,
        * this field is empty.
    @@ -596,7 +598,8 @@ public com.google.spanner.admin.database.v1.EncryptionConfig getEncryptionConfig
        * 
        * Output only. For databases that are using customer managed encryption, this
        * field contains the encryption information for the database, such as
    -   * encryption state and the Cloud KMS key versions that are in use.
    +   * all Cloud KMS key versions that are in use. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated.
        *
        * For databases that are using Google default or other types of encryption,
        * this field is empty.
    @@ -619,7 +622,8 @@ public int getEncryptionInfoCount() {
        * 
        * Output only. For databases that are using customer managed encryption, this
        * field contains the encryption information for the database, such as
    -   * encryption state and the Cloud KMS key versions that are in use.
    +   * all Cloud KMS key versions that are in use. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated.
        *
        * For databases that are using Google default or other types of encryption,
        * this field is empty.
    @@ -642,7 +646,8 @@ public com.google.spanner.admin.database.v1.EncryptionInfo getEncryptionInfo(int
        * 
        * Output only. For databases that are using customer managed encryption, this
        * field contains the encryption information for the database, such as
    -   * encryption state and the Cloud KMS key versions that are in use.
    +   * all Cloud KMS key versions that are in use. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated.
        *
        * For databases that are using Google default or other types of encryption,
        * this field is empty.
    @@ -672,8 +677,8 @@ public com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder getEncryptio
        * Output only. The period in which Cloud Spanner retains all versions of data
        * for the database. This is the same as the value of version_retention_period
        * database option set using
    -   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. Defaults to 1 hour,
    -   * if not set.
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
    +   * Defaults to 1 hour, if not set.
        * 
    * * string version_retention_period = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -699,8 +704,8 @@ public java.lang.String getVersionRetentionPeriod() { * Output only. The period in which Cloud Spanner retains all versions of data * for the database. This is the same as the value of version_retention_period * database option set using - * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. Defaults to 1 hour, - * if not set. + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. + * Defaults to 1 hour, if not set. *
    * * string version_retention_period = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -897,7 +902,9 @@ public com.google.spanner.admin.database.v1.DatabaseDialect getDatabaseDialect() * *
        * Whether drop protection is enabled for this database. Defaults to false,
    -   * if not set.
    +   * if not set. For more details, please see how to [prevent accidental
    +   * database
    +   * deletion](https://cloud.google.com/spanner/docs/prevent-database-deletion).
        * 
    * * bool enable_drop_protection = 11; @@ -2538,7 +2545,8 @@ private void ensureEncryptionInfoIsMutable() { *
          * Output only. For databases that are using customer managed encryption, this
          * field contains the encryption information for the database, such as
    -     * encryption state and the Cloud KMS key versions that are in use.
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
          *
          * For databases that are using Google default or other types of encryption,
          * this field is empty.
    @@ -2565,7 +2573,8 @@ private void ensureEncryptionInfoIsMutable() {
          * 
          * Output only. For databases that are using customer managed encryption, this
          * field contains the encryption information for the database, such as
    -     * encryption state and the Cloud KMS key versions that are in use.
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
          *
          * For databases that are using Google default or other types of encryption,
          * this field is empty.
    @@ -2591,7 +2600,8 @@ public int getEncryptionInfoCount() {
          * 
          * Output only. For databases that are using customer managed encryption, this
          * field contains the encryption information for the database, such as
    -     * encryption state and the Cloud KMS key versions that are in use.
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
          *
          * For databases that are using Google default or other types of encryption,
          * this field is empty.
    @@ -2617,7 +2627,8 @@ public com.google.spanner.admin.database.v1.EncryptionInfo getEncryptionInfo(int
          * 
          * Output only. For databases that are using customer managed encryption, this
          * field contains the encryption information for the database, such as
    -     * encryption state and the Cloud KMS key versions that are in use.
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
          *
          * For databases that are using Google default or other types of encryption,
          * this field is empty.
    @@ -2650,7 +2661,8 @@ public Builder setEncryptionInfo(
          * 
          * Output only. For databases that are using customer managed encryption, this
          * field contains the encryption information for the database, such as
    -     * encryption state and the Cloud KMS key versions that are in use.
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
          *
          * For databases that are using Google default or other types of encryption,
          * this field is empty.
    @@ -2680,7 +2692,8 @@ public Builder setEncryptionInfo(
          * 
          * Output only. For databases that are using customer managed encryption, this
          * field contains the encryption information for the database, such as
    -     * encryption state and the Cloud KMS key versions that are in use.
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
          *
          * For databases that are using Google default or other types of encryption,
          * this field is empty.
    @@ -2712,7 +2725,8 @@ public Builder addEncryptionInfo(com.google.spanner.admin.database.v1.Encryption
          * 
          * Output only. For databases that are using customer managed encryption, this
          * field contains the encryption information for the database, such as
    -     * encryption state and the Cloud KMS key versions that are in use.
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
          *
          * For databases that are using Google default or other types of encryption,
          * this field is empty.
    @@ -2745,7 +2759,8 @@ public Builder addEncryptionInfo(
          * 
          * Output only. For databases that are using customer managed encryption, this
          * field contains the encryption information for the database, such as
    -     * encryption state and the Cloud KMS key versions that are in use.
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
          *
          * For databases that are using Google default or other types of encryption,
          * this field is empty.
    @@ -2775,7 +2790,8 @@ public Builder addEncryptionInfo(
          * 
          * Output only. For databases that are using customer managed encryption, this
          * field contains the encryption information for the database, such as
    -     * encryption state and the Cloud KMS key versions that are in use.
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
          *
          * For databases that are using Google default or other types of encryption,
          * this field is empty.
    @@ -2805,7 +2821,8 @@ public Builder addEncryptionInfo(
          * 
          * Output only. For databases that are using customer managed encryption, this
          * field contains the encryption information for the database, such as
    -     * encryption state and the Cloud KMS key versions that are in use.
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
          *
          * For databases that are using Google default or other types of encryption,
          * this field is empty.
    @@ -2835,7 +2852,8 @@ public Builder addAllEncryptionInfo(
          * 
          * Output only. For databases that are using customer managed encryption, this
          * field contains the encryption information for the database, such as
    -     * encryption state and the Cloud KMS key versions that are in use.
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
          *
          * For databases that are using Google default or other types of encryption,
          * this field is empty.
    @@ -2864,7 +2882,8 @@ public Builder clearEncryptionInfo() {
          * 
          * Output only. For databases that are using customer managed encryption, this
          * field contains the encryption information for the database, such as
    -     * encryption state and the Cloud KMS key versions that are in use.
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
          *
          * For databases that are using Google default or other types of encryption,
          * this field is empty.
    @@ -2893,7 +2912,8 @@ public Builder removeEncryptionInfo(int index) {
          * 
          * Output only. For databases that are using customer managed encryption, this
          * field contains the encryption information for the database, such as
    -     * encryption state and the Cloud KMS key versions that are in use.
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
          *
          * For databases that are using Google default or other types of encryption,
          * this field is empty.
    @@ -2916,7 +2936,8 @@ public com.google.spanner.admin.database.v1.EncryptionInfo.Builder getEncryption
          * 
          * Output only. For databases that are using customer managed encryption, this
          * field contains the encryption information for the database, such as
    -     * encryption state and the Cloud KMS key versions that are in use.
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
          *
          * For databases that are using Google default or other types of encryption,
          * this field is empty.
    @@ -2943,7 +2964,8 @@ public com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder getEncryptio
          * 
          * Output only. For databases that are using customer managed encryption, this
          * field contains the encryption information for the database, such as
    -     * encryption state and the Cloud KMS key versions that are in use.
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
          *
          * For databases that are using Google default or other types of encryption,
          * this field is empty.
    @@ -2970,7 +2992,8 @@ public com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder getEncryptio
          * 
          * Output only. For databases that are using customer managed encryption, this
          * field contains the encryption information for the database, such as
    -     * encryption state and the Cloud KMS key versions that are in use.
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
          *
          * For databases that are using Google default or other types of encryption,
          * this field is empty.
    @@ -2993,7 +3016,8 @@ public com.google.spanner.admin.database.v1.EncryptionInfo.Builder addEncryption
          * 
          * Output only. For databases that are using customer managed encryption, this
          * field contains the encryption information for the database, such as
    -     * encryption state and the Cloud KMS key versions that are in use.
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
          *
          * For databases that are using Google default or other types of encryption,
          * this field is empty.
    @@ -3018,7 +3042,8 @@ public com.google.spanner.admin.database.v1.EncryptionInfo.Builder addEncryption
          * 
          * Output only. For databases that are using customer managed encryption, this
          * field contains the encryption information for the database, such as
    -     * encryption state and the Cloud KMS key versions that are in use.
    +     * all Cloud KMS key versions that are in use. The `encryption_status' field
    +     * inside of each `EncryptionInfo` is not populated.
          *
          * For databases that are using Google default or other types of encryption,
          * this field is empty.
    @@ -3064,8 +3089,8 @@ public com.google.spanner.admin.database.v1.EncryptionInfo.Builder addEncryption
          * Output only. The period in which Cloud Spanner retains all versions of data
          * for the database. This is the same as the value of version_retention_period
          * database option set using
    -     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. Defaults to 1 hour,
    -     * if not set.
    +     * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
    +     * Defaults to 1 hour, if not set.
          * 
    * * string version_retention_period = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -3091,8 +3116,8 @@ public java.lang.String getVersionRetentionPeriod() { * Output only. The period in which Cloud Spanner retains all versions of data * for the database. This is the same as the value of version_retention_period * database option set using - * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. Defaults to 1 hour, - * if not set. + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. + * Defaults to 1 hour, if not set. *
    * * string version_retention_period = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -3118,8 +3143,8 @@ public com.google.protobuf.ByteString getVersionRetentionPeriodBytes() { * Output only. The period in which Cloud Spanner retains all versions of data * for the database. This is the same as the value of version_retention_period * database option set using - * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. Defaults to 1 hour, - * if not set. + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. + * Defaults to 1 hour, if not set. *
    * * string version_retention_period = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -3144,8 +3169,8 @@ public Builder setVersionRetentionPeriod(java.lang.String value) { * Output only. The period in which Cloud Spanner retains all versions of data * for the database. This is the same as the value of version_retention_period * database option set using - * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. Defaults to 1 hour, - * if not set. + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. + * Defaults to 1 hour, if not set. *
    * * string version_retention_period = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -3166,8 +3191,8 @@ public Builder clearVersionRetentionPeriod() { * Output only. The period in which Cloud Spanner retains all versions of data * for the database. This is the same as the value of version_retention_period * database option set using - * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. Defaults to 1 hour, - * if not set. + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. + * Defaults to 1 hour, if not set. *
    * * string version_retention_period = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -3665,7 +3690,9 @@ public Builder clearDatabaseDialect() { * *
          * Whether drop protection is enabled for this database. Defaults to false,
    -     * if not set.
    +     * if not set. For more details, please see how to [prevent accidental
    +     * database
    +     * deletion](https://cloud.google.com/spanner/docs/prevent-database-deletion).
          * 
    * * bool enable_drop_protection = 11; @@ -3681,7 +3708,9 @@ public boolean getEnableDropProtection() { * *
          * Whether drop protection is enabled for this database. Defaults to false,
    -     * if not set.
    +     * if not set. For more details, please see how to [prevent accidental
    +     * database
    +     * deletion](https://cloud.google.com/spanner/docs/prevent-database-deletion).
          * 
    * * bool enable_drop_protection = 11; @@ -3701,7 +3730,9 @@ public Builder setEnableDropProtection(boolean value) { * *
          * Whether drop protection is enabled for this database. Defaults to false,
    -     * if not set.
    +     * if not set. For more details, please see how to [prevent accidental
    +     * database
    +     * deletion](https://cloud.google.com/spanner/docs/prevent-database-deletion).
          * 
    * * bool enable_drop_protection = 11; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseDialect.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseDialect.java index 3ef5ebce606..e4c35ad41f9 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseDialect.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseDialect.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/common.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** @@ -44,7 +44,7 @@ public enum DatabaseDialect implements com.google.protobuf.ProtocolMessageEnum { * * *
    -   * Google standard SQL.
    +   * GoogleSQL supported SQL.
        * 
    * * GOOGLE_STANDARD_SQL = 1; @@ -78,7 +78,7 @@ public enum DatabaseDialect implements com.google.protobuf.ProtocolMessageEnum { * * *
    -   * Google standard SQL.
    +   * GoogleSQL supported SQL.
        * 
    * * GOOGLE_STANDARD_SQL = 1; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseOrBuilder.java index 18345d9ad61..a55aba751c2 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface DatabaseOrBuilder @@ -224,7 +224,8 @@ public interface DatabaseOrBuilder *
        * Output only. For databases that are using customer managed encryption, this
        * field contains the encryption information for the database, such as
    -   * encryption state and the Cloud KMS key versions that are in use.
    +   * all Cloud KMS key versions that are in use. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated.
        *
        * For databases that are using Google default or other types of encryption,
        * this field is empty.
    @@ -244,7 +245,8 @@ public interface DatabaseOrBuilder
        * 
        * Output only. For databases that are using customer managed encryption, this
        * field contains the encryption information for the database, such as
    -   * encryption state and the Cloud KMS key versions that are in use.
    +   * all Cloud KMS key versions that are in use. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated.
        *
        * For databases that are using Google default or other types of encryption,
        * this field is empty.
    @@ -264,7 +266,8 @@ public interface DatabaseOrBuilder
        * 
        * Output only. For databases that are using customer managed encryption, this
        * field contains the encryption information for the database, such as
    -   * encryption state and the Cloud KMS key versions that are in use.
    +   * all Cloud KMS key versions that are in use. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated.
        *
        * For databases that are using Google default or other types of encryption,
        * this field is empty.
    @@ -284,7 +287,8 @@ public interface DatabaseOrBuilder
        * 
        * Output only. For databases that are using customer managed encryption, this
        * field contains the encryption information for the database, such as
    -   * encryption state and the Cloud KMS key versions that are in use.
    +   * all Cloud KMS key versions that are in use. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated.
        *
        * For databases that are using Google default or other types of encryption,
        * this field is empty.
    @@ -305,7 +309,8 @@ public interface DatabaseOrBuilder
        * 
        * Output only. For databases that are using customer managed encryption, this
        * field contains the encryption information for the database, such as
    -   * encryption state and the Cloud KMS key versions that are in use.
    +   * all Cloud KMS key versions that are in use. The `encryption_status' field
    +   * inside of each `EncryptionInfo` is not populated.
        *
        * For databases that are using Google default or other types of encryption,
        * this field is empty.
    @@ -328,8 +333,8 @@ com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder getEncryptionInfoOr
        * Output only. The period in which Cloud Spanner retains all versions of data
        * for the database. This is the same as the value of version_retention_period
        * database option set using
    -   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. Defaults to 1 hour,
    -   * if not set.
    +   * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
    +   * Defaults to 1 hour, if not set.
        * 
    * * string version_retention_period = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -344,8 +349,8 @@ com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder getEncryptionInfoOr * Output only. The period in which Cloud Spanner retains all versions of data * for the database. This is the same as the value of version_retention_period * database option set using - * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. Defaults to 1 hour, - * if not set. + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. + * Defaults to 1 hour, if not set. *
    * * string version_retention_period = 6 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -476,7 +481,9 @@ com.google.spanner.admin.database.v1.EncryptionInfoOrBuilder getEncryptionInfoOr * *
        * Whether drop protection is enabled for this database. Defaults to false,
    -   * if not set.
    +   * if not set. For more details, please see how to [prevent accidental
    +   * database
    +   * deletion](https://cloud.google.com/spanner/docs/prevent-database-deletion).
        * 
    * * bool enable_drop_protection = 11; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseRole.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseRole.java index ccfc053b9fe..7120de00d7b 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseRole.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseRole.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** @@ -72,10 +72,8 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * *
        * Required. The name of the database role. Values are of the form
    -   * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/
    -   * {role}`, where `<role>` is as specified in the `CREATE ROLE`
    -   * DDL statement. This name can be passed to Get/Set IAMPolicy methods to
    -   * identify the database role.
    +   * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/<role>`
    +   * where `<role>` is as specified in the `CREATE ROLE` DDL statement.
        * 
    * * string name = 1 [(.google.api.field_behavior) = REQUIRED]; @@ -99,10 +97,8 @@ public java.lang.String getName() { * *
        * Required. The name of the database role. Values are of the form
    -   * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/
    -   * {role}`, where `<role>` is as specified in the `CREATE ROLE`
    -   * DDL statement. This name can be passed to Get/Set IAMPolicy methods to
    -   * identify the database role.
    +   * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/<role>`
    +   * where `<role>` is as specified in the `CREATE ROLE` DDL statement.
        * 
    * * string name = 1 [(.google.api.field_behavior) = REQUIRED]; @@ -470,10 +466,8 @@ public Builder mergeFrom( * *
          * Required. The name of the database role. Values are of the form
    -     * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/
    -     * {role}`, where `<role>` is as specified in the `CREATE ROLE`
    -     * DDL statement. This name can be passed to Get/Set IAMPolicy methods to
    -     * identify the database role.
    +     * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/<role>`
    +     * where `<role>` is as specified in the `CREATE ROLE` DDL statement.
          * 
    * * string name = 1 [(.google.api.field_behavior) = REQUIRED]; @@ -496,10 +490,8 @@ public java.lang.String getName() { * *
          * Required. The name of the database role. Values are of the form
    -     * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/
    -     * {role}`, where `<role>` is as specified in the `CREATE ROLE`
    -     * DDL statement. This name can be passed to Get/Set IAMPolicy methods to
    -     * identify the database role.
    +     * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/<role>`
    +     * where `<role>` is as specified in the `CREATE ROLE` DDL statement.
          * 
    * * string name = 1 [(.google.api.field_behavior) = REQUIRED]; @@ -522,10 +514,8 @@ public com.google.protobuf.ByteString getNameBytes() { * *
          * Required. The name of the database role. Values are of the form
    -     * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/
    -     * {role}`, where `<role>` is as specified in the `CREATE ROLE`
    -     * DDL statement. This name can be passed to Get/Set IAMPolicy methods to
    -     * identify the database role.
    +     * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/<role>`
    +     * where `<role>` is as specified in the `CREATE ROLE` DDL statement.
          * 
    * * string name = 1 [(.google.api.field_behavior) = REQUIRED]; @@ -547,10 +537,8 @@ public Builder setName(java.lang.String value) { * *
          * Required. The name of the database role. Values are of the form
    -     * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/
    -     * {role}`, where `<role>` is as specified in the `CREATE ROLE`
    -     * DDL statement. This name can be passed to Get/Set IAMPolicy methods to
    -     * identify the database role.
    +     * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/<role>`
    +     * where `<role>` is as specified in the `CREATE ROLE` DDL statement.
          * 
    * * string name = 1 [(.google.api.field_behavior) = REQUIRED]; @@ -568,10 +556,8 @@ public Builder clearName() { * *
          * Required. The name of the database role. Values are of the form
    -     * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/
    -     * {role}`, where `<role>` is as specified in the `CREATE ROLE`
    -     * DDL statement. This name can be passed to Get/Set IAMPolicy methods to
    -     * identify the database role.
    +     * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/<role>`
    +     * where `<role>` is as specified in the `CREATE ROLE` DDL statement.
          * 
    * * string name = 1 [(.google.api.field_behavior) = REQUIRED]; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseRoleOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseRoleOrBuilder.java index aa452bbf47a..1b9e8e65cb7 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseRoleOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseRoleOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface DatabaseRoleOrBuilder @@ -29,10 +29,8 @@ public interface DatabaseRoleOrBuilder * *
        * Required. The name of the database role. Values are of the form
    -   * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/
    -   * {role}`, where `<role>` is as specified in the `CREATE ROLE`
    -   * DDL statement. This name can be passed to Get/Set IAMPolicy methods to
    -   * identify the database role.
    +   * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/<role>`
    +   * where `<role>` is as specified in the `CREATE ROLE` DDL statement.
        * 
    * * string name = 1 [(.google.api.field_behavior) = REQUIRED]; @@ -45,10 +43,8 @@ public interface DatabaseRoleOrBuilder * *
        * Required. The name of the database role. Values are of the form
    -   * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/
    -   * {role}`, where `<role>` is as specified in the `CREATE ROLE`
    -   * DDL statement. This name can be passed to Get/Set IAMPolicy methods to
    -   * identify the database role.
    +   * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/<role>`
    +   * where `<role>` is as specified in the `CREATE ROLE` DDL statement.
        * 
    * * string name = 1 [(.google.api.field_behavior) = REQUIRED]; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DdlStatementActionInfo.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DdlStatementActionInfo.java index 3e376aaf268..cd72e2717ce 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DdlStatementActionInfo.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DdlStatementActionInfo.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DdlStatementActionInfoOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DdlStatementActionInfoOrBuilder.java index 4d6b46ba11c..2e17d981b63 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DdlStatementActionInfoOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DdlStatementActionInfoOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface DdlStatementActionInfoOrBuilder diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupRequest.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupRequest.java index 6b3f02c8c7a..14c49f95d71 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupRequest.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupRequest.java @@ -16,14 +16,15 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** * * *
    - * The request for [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup].
    + * The request for
    + * [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup].
      * 
    * * Protobuf type {@code google.spanner.admin.database.v1.DeleteBackupRequest} @@ -286,7 +287,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
    -   * The request for [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup].
    +   * The request for
    +   * [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup].
        * 
    * * Protobuf type {@code google.spanner.admin.database.v1.DeleteBackupRequest} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupRequestOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupRequestOrBuilder.java index 19f29594533..a80fc15aee4 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface DeleteBackupRequestOrBuilder diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupScheduleRequest.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupScheduleRequest.java new file mode 100644 index 00000000000..cff5ce2a814 --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupScheduleRequest.java @@ -0,0 +1,663 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/backup_schedule.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [DeleteBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.DeleteBackupScheduleRequest} + */ +public final class DeleteBackupScheduleRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.DeleteBackupScheduleRequest) + DeleteBackupScheduleRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use DeleteBackupScheduleRequest.newBuilder() to construct. + private DeleteBackupScheduleRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private DeleteBackupScheduleRequest() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new DeleteBackupScheduleRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_DeleteBackupScheduleRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_DeleteBackupScheduleRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest.class, + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + /** + * + * + *
    +   * Required. The name of the schedule to delete.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
    +   * Required. The name of the schedule to delete.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest other = + (com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
    +   * The request for
    +   * [DeleteBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.DeleteBackupScheduleRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.DeleteBackupScheduleRequest) + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_DeleteBackupScheduleRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_DeleteBackupScheduleRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest.class, + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_DeleteBackupScheduleRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest build() { + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest buildPartial() { + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest result = + new com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest other) { + if (other + == com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + /** + * + * + *
    +     * Required. The name of the schedule to delete.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
    +     * Required. The name of the schedule to delete.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
    +     * Required. The name of the schedule to delete.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. The name of the schedule to delete.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. The name of the schedule to delete.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.DeleteBackupScheduleRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.DeleteBackupScheduleRequest) + private static final com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest(); + } + + public static com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DeleteBackupScheduleRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupScheduleRequestOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupScheduleRequestOrBuilder.java new file mode 100644 index 00000000000..f67464c23fb --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DeleteBackupScheduleRequestOrBuilder.java @@ -0,0 +1,59 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/backup_schedule.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.database.v1; + +public interface DeleteBackupScheduleRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.DeleteBackupScheduleRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the schedule to delete.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + *
    +   * Required. The name of the schedule to delete.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DropDatabaseRequest.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DropDatabaseRequest.java index 9d7823c8ef0..1e8979c919e 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DropDatabaseRequest.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DropDatabaseRequest.java @@ -16,14 +16,15 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** * * *
    - * The request for [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase].
    + * The request for
    + * [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase].
      * 
    * * Protobuf type {@code google.spanner.admin.database.v1.DropDatabaseRequest} @@ -282,7 +283,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
    -   * The request for [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase].
    +   * The request for
    +   * [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase].
        * 
    * * Protobuf type {@code google.spanner.admin.database.v1.DropDatabaseRequest} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DropDatabaseRequestOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DropDatabaseRequestOrBuilder.java index 9df173723f4..f68694371a0 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DropDatabaseRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DropDatabaseRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface DropDatabaseRequestOrBuilder diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionConfig.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionConfig.java index 4b7a16a4463..3115406df99 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionConfig.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionConfig.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/common.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** @@ -40,6 +40,7 @@ private EncryptionConfig(com.google.protobuf.GeneratedMessageV3.Builder build private EncryptionConfig() { kmsKeyName_ = ""; + kmsKeyNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); } @java.lang.Override @@ -118,6 +119,126 @@ public com.google.protobuf.ByteString getKmsKeyNameBytes() { } } + public static final int KMS_KEY_NAMES_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList kmsKeyNames_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + /** + * + * + *
    +   * Specifies the KMS configuration for the one or more keys used to encrypt
    +   * the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @return A list containing the kmsKeyNames. + */ + public com.google.protobuf.ProtocolStringList getKmsKeyNamesList() { + return kmsKeyNames_; + } + /** + * + * + *
    +   * Specifies the KMS configuration for the one or more keys used to encrypt
    +   * the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @return The count of kmsKeyNames. + */ + public int getKmsKeyNamesCount() { + return kmsKeyNames_.size(); + } + /** + * + * + *
    +   * Specifies the KMS configuration for the one or more keys used to encrypt
    +   * the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @param index The index of the element to return. + * @return The kmsKeyNames at the given index. + */ + public java.lang.String getKmsKeyNames(int index) { + return kmsKeyNames_.get(index); + } + /** + * + * + *
    +   * Specifies the KMS configuration for the one or more keys used to encrypt
    +   * the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @param index The index of the value to return. + * @return The bytes of the kmsKeyNames at the given index. + */ + public com.google.protobuf.ByteString getKmsKeyNamesBytes(int index) { + return kmsKeyNames_.getByteString(index); + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -135,6 +256,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(kmsKeyName_)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 2, kmsKeyName_); } + for (int i = 0; i < kmsKeyNames_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, kmsKeyNames_.getRaw(i)); + } getUnknownFields().writeTo(output); } @@ -147,6 +271,14 @@ public int getSerializedSize() { if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(kmsKeyName_)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, kmsKeyName_); } + { + int dataSize = 0; + for (int i = 0; i < kmsKeyNames_.size(); i++) { + dataSize += computeStringSizeNoTag(kmsKeyNames_.getRaw(i)); + } + size += dataSize; + size += 1 * getKmsKeyNamesList().size(); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -164,6 +296,7 @@ public boolean equals(final java.lang.Object obj) { (com.google.spanner.admin.database.v1.EncryptionConfig) obj; if (!getKmsKeyName().equals(other.getKmsKeyName())) return false; + if (!getKmsKeyNamesList().equals(other.getKmsKeyNamesList())) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @@ -177,6 +310,10 @@ public int hashCode() { hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + KMS_KEY_NAME_FIELD_NUMBER; hash = (53 * hash) + getKmsKeyName().hashCode(); + if (getKmsKeyNamesCount() > 0) { + hash = (37 * hash) + KMS_KEY_NAMES_FIELD_NUMBER; + hash = (53 * hash) + getKmsKeyNamesList().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -318,6 +455,7 @@ public Builder clear() { super.clear(); bitField0_ = 0; kmsKeyName_ = ""; + kmsKeyNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); return this; } @@ -357,6 +495,10 @@ private void buildPartial0(com.google.spanner.admin.database.v1.EncryptionConfig if (((from_bitField0_ & 0x00000001) != 0)) { result.kmsKeyName_ = kmsKeyName_; } + if (((from_bitField0_ & 0x00000002) != 0)) { + kmsKeyNames_.makeImmutable(); + result.kmsKeyNames_ = kmsKeyNames_; + } } @java.lang.Override @@ -410,6 +552,16 @@ public Builder mergeFrom(com.google.spanner.admin.database.v1.EncryptionConfig o bitField0_ |= 0x00000001; onChanged(); } + if (!other.kmsKeyNames_.isEmpty()) { + if (kmsKeyNames_.isEmpty()) { + kmsKeyNames_ = other.kmsKeyNames_; + bitField0_ |= 0x00000002; + } else { + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.addAll(other.kmsKeyNames_); + } + onChanged(); + } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; @@ -442,6 +594,13 @@ public Builder mergeFrom( bitField0_ |= 0x00000001; break; } // case 18 + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.add(s); + break; + } // case 26 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { @@ -577,6 +736,306 @@ public Builder setKmsKeyNameBytes(com.google.protobuf.ByteString value) { return this; } + private com.google.protobuf.LazyStringArrayList kmsKeyNames_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureKmsKeyNamesIsMutable() { + if (!kmsKeyNames_.isModifiable()) { + kmsKeyNames_ = new com.google.protobuf.LazyStringArrayList(kmsKeyNames_); + } + bitField0_ |= 0x00000002; + } + /** + * + * + *
    +     * Specifies the KMS configuration for the one or more keys used to encrypt
    +     * the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @return A list containing the kmsKeyNames. + */ + public com.google.protobuf.ProtocolStringList getKmsKeyNamesList() { + kmsKeyNames_.makeImmutable(); + return kmsKeyNames_; + } + /** + * + * + *
    +     * Specifies the KMS configuration for the one or more keys used to encrypt
    +     * the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @return The count of kmsKeyNames. + */ + public int getKmsKeyNamesCount() { + return kmsKeyNames_.size(); + } + /** + * + * + *
    +     * Specifies the KMS configuration for the one or more keys used to encrypt
    +     * the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @param index The index of the element to return. + * @return The kmsKeyNames at the given index. + */ + public java.lang.String getKmsKeyNames(int index) { + return kmsKeyNames_.get(index); + } + /** + * + * + *
    +     * Specifies the KMS configuration for the one or more keys used to encrypt
    +     * the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @param index The index of the value to return. + * @return The bytes of the kmsKeyNames at the given index. + */ + public com.google.protobuf.ByteString getKmsKeyNamesBytes(int index) { + return kmsKeyNames_.getByteString(index); + } + /** + * + * + *
    +     * Specifies the KMS configuration for the one or more keys used to encrypt
    +     * the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @param index The index to set the value at. + * @param value The kmsKeyNames to set. + * @return This builder for chaining. + */ + public Builder setKmsKeyNames(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.set(index, value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
    +     * Specifies the KMS configuration for the one or more keys used to encrypt
    +     * the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @param value The kmsKeyNames to add. + * @return This builder for chaining. + */ + public Builder addKmsKeyNames(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
    +     * Specifies the KMS configuration for the one or more keys used to encrypt
    +     * the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @param values The kmsKeyNames to add. + * @return This builder for chaining. + */ + public Builder addAllKmsKeyNames(java.lang.Iterable values) { + ensureKmsKeyNamesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, kmsKeyNames_); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
    +     * Specifies the KMS configuration for the one or more keys used to encrypt
    +     * the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @return This builder for chaining. + */ + public Builder clearKmsKeyNames() { + kmsKeyNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + ; + onChanged(); + return this; + } + /** + * + * + *
    +     * Specifies the KMS configuration for the one or more keys used to encrypt
    +     * the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @param value The bytes of the kmsKeyNames to add. + * @return This builder for chaining. + */ + public Builder addKmsKeyNamesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.add(value); + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionConfigOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionConfigOrBuilder.java index 7945052b3d4..2d24ffb1393 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionConfigOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionConfigOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/common.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface EncryptionConfigOrBuilder @@ -52,4 +52,111 @@ public interface EncryptionConfigOrBuilder * @return The bytes for kmsKeyName. */ com.google.protobuf.ByteString getKmsKeyNameBytes(); + + /** + * + * + *
    +   * Specifies the KMS configuration for the one or more keys used to encrypt
    +   * the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @return A list containing the kmsKeyNames. + */ + java.util.List getKmsKeyNamesList(); + /** + * + * + *
    +   * Specifies the KMS configuration for the one or more keys used to encrypt
    +   * the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @return The count of kmsKeyNames. + */ + int getKmsKeyNamesCount(); + /** + * + * + *
    +   * Specifies the KMS configuration for the one or more keys used to encrypt
    +   * the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @param index The index of the element to return. + * @return The kmsKeyNames at the given index. + */ + java.lang.String getKmsKeyNames(int index); + /** + * + * + *
    +   * Specifies the KMS configuration for the one or more keys used to encrypt
    +   * the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * repeated string kms_key_names = 3 [(.google.api.resource_reference) = { ... } + * + * @param index The index of the value to return. + * @return The bytes of the kmsKeyNames at the given index. + */ + com.google.protobuf.ByteString getKmsKeyNamesBytes(int index); } diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionInfo.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionInfo.java index 7fcbe830322..9c3113e6ccf 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionInfo.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionInfo.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/common.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** @@ -279,9 +279,9 @@ public com.google.spanner.admin.database.v1.EncryptionInfo.Type getEncryptionTyp * * *
    -   * Output only. If present, the status of a recent encrypt/decrypt call on underlying data
    -   * for this database or backup. Regardless of status, data is always encrypted
    -   * at rest.
    +   * Output only. If present, the status of a recent encrypt/decrypt call on
    +   * underlying data for this database or backup. Regardless of status, data is
    +   * always encrypted at rest.
        * 
    * * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -297,9 +297,9 @@ public boolean hasEncryptionStatus() { * * *
    -   * Output only. If present, the status of a recent encrypt/decrypt call on underlying data
    -   * for this database or backup. Regardless of status, data is always encrypted
    -   * at rest.
    +   * Output only. If present, the status of a recent encrypt/decrypt call on
    +   * underlying data for this database or backup. Regardless of status, data is
    +   * always encrypted at rest.
        * 
    * * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -317,9 +317,9 @@ public com.google.rpc.Status getEncryptionStatus() { * * *
    -   * Output only. If present, the status of a recent encrypt/decrypt call on underlying data
    -   * for this database or backup. Regardless of status, data is always encrypted
    -   * at rest.
    +   * Output only. If present, the status of a recent encrypt/decrypt call on
    +   * underlying data for this database or backup. Regardless of status, data is
    +   * always encrypted at rest.
        * 
    * * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -340,8 +340,8 @@ public com.google.rpc.StatusOrBuilder getEncryptionStatusOrBuilder() { * * *
    -   * Output only. A Cloud KMS key version that is being used to protect the database or
    -   * backup.
    +   * Output only. A Cloud KMS key version that is being used to protect the
    +   * database or backup.
        * 
    * * @@ -366,8 +366,8 @@ public java.lang.String getKmsKeyVersion() { * * *
    -   * Output only. A Cloud KMS key version that is being used to protect the database or
    -   * backup.
    +   * Output only. A Cloud KMS key version that is being used to protect the
    +   * database or backup.
        * 
    * * @@ -911,9 +911,9 @@ public Builder clearEncryptionType() { * * *
    -     * Output only. If present, the status of a recent encrypt/decrypt call on underlying data
    -     * for this database or backup. Regardless of status, data is always encrypted
    -     * at rest.
    +     * Output only. If present, the status of a recent encrypt/decrypt call on
    +     * underlying data for this database or backup. Regardless of status, data is
    +     * always encrypted at rest.
          * 
    * * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -928,9 +928,9 @@ public boolean hasEncryptionStatus() { * * *
    -     * Output only. If present, the status of a recent encrypt/decrypt call on underlying data
    -     * for this database or backup. Regardless of status, data is always encrypted
    -     * at rest.
    +     * Output only. If present, the status of a recent encrypt/decrypt call on
    +     * underlying data for this database or backup. Regardless of status, data is
    +     * always encrypted at rest.
          * 
    * * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -951,9 +951,9 @@ public com.google.rpc.Status getEncryptionStatus() { * * *
    -     * Output only. If present, the status of a recent encrypt/decrypt call on underlying data
    -     * for this database or backup. Regardless of status, data is always encrypted
    -     * at rest.
    +     * Output only. If present, the status of a recent encrypt/decrypt call on
    +     * underlying data for this database or backup. Regardless of status, data is
    +     * always encrypted at rest.
          * 
    * * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -976,9 +976,9 @@ public Builder setEncryptionStatus(com.google.rpc.Status value) { * * *
    -     * Output only. If present, the status of a recent encrypt/decrypt call on underlying data
    -     * for this database or backup. Regardless of status, data is always encrypted
    -     * at rest.
    +     * Output only. If present, the status of a recent encrypt/decrypt call on
    +     * underlying data for this database or backup. Regardless of status, data is
    +     * always encrypted at rest.
          * 
    * * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -998,9 +998,9 @@ public Builder setEncryptionStatus(com.google.rpc.Status.Builder builderForValue * * *
    -     * Output only. If present, the status of a recent encrypt/decrypt call on underlying data
    -     * for this database or backup. Regardless of status, data is always encrypted
    -     * at rest.
    +     * Output only. If present, the status of a recent encrypt/decrypt call on
    +     * underlying data for this database or backup. Regardless of status, data is
    +     * always encrypted at rest.
          * 
    * * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -1028,9 +1028,9 @@ public Builder mergeEncryptionStatus(com.google.rpc.Status value) { * * *
    -     * Output only. If present, the status of a recent encrypt/decrypt call on underlying data
    -     * for this database or backup. Regardless of status, data is always encrypted
    -     * at rest.
    +     * Output only. If present, the status of a recent encrypt/decrypt call on
    +     * underlying data for this database or backup. Regardless of status, data is
    +     * always encrypted at rest.
          * 
    * * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -1050,9 +1050,9 @@ public Builder clearEncryptionStatus() { * * *
    -     * Output only. If present, the status of a recent encrypt/decrypt call on underlying data
    -     * for this database or backup. Regardless of status, data is always encrypted
    -     * at rest.
    +     * Output only. If present, the status of a recent encrypt/decrypt call on
    +     * underlying data for this database or backup. Regardless of status, data is
    +     * always encrypted at rest.
          * 
    * * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -1067,9 +1067,9 @@ public com.google.rpc.Status.Builder getEncryptionStatusBuilder() { * * *
    -     * Output only. If present, the status of a recent encrypt/decrypt call on underlying data
    -     * for this database or backup. Regardless of status, data is always encrypted
    -     * at rest.
    +     * Output only. If present, the status of a recent encrypt/decrypt call on
    +     * underlying data for this database or backup. Regardless of status, data is
    +     * always encrypted at rest.
          * 
    * * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -1088,9 +1088,9 @@ public com.google.rpc.StatusOrBuilder getEncryptionStatusOrBuilder() { * * *
    -     * Output only. If present, the status of a recent encrypt/decrypt call on underlying data
    -     * for this database or backup. Regardless of status, data is always encrypted
    -     * at rest.
    +     * Output only. If present, the status of a recent encrypt/decrypt call on
    +     * underlying data for this database or backup. Regardless of status, data is
    +     * always encrypted at rest.
          * 
    * * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -1116,8 +1116,8 @@ public com.google.rpc.StatusOrBuilder getEncryptionStatusOrBuilder() { * * *
    -     * Output only. A Cloud KMS key version that is being used to protect the database or
    -     * backup.
    +     * Output only. A Cloud KMS key version that is being used to protect the
    +     * database or backup.
          * 
    * * @@ -1141,8 +1141,8 @@ public java.lang.String getKmsKeyVersion() { * * *
    -     * Output only. A Cloud KMS key version that is being used to protect the database or
    -     * backup.
    +     * Output only. A Cloud KMS key version that is being used to protect the
    +     * database or backup.
          * 
    * * @@ -1166,8 +1166,8 @@ public com.google.protobuf.ByteString getKmsKeyVersionBytes() { * * *
    -     * Output only. A Cloud KMS key version that is being used to protect the database or
    -     * backup.
    +     * Output only. A Cloud KMS key version that is being used to protect the
    +     * database or backup.
          * 
    * * @@ -1190,8 +1190,8 @@ public Builder setKmsKeyVersion(java.lang.String value) { * * *
    -     * Output only. A Cloud KMS key version that is being used to protect the database or
    -     * backup.
    +     * Output only. A Cloud KMS key version that is being used to protect the
    +     * database or backup.
          * 
    * * @@ -1210,8 +1210,8 @@ public Builder clearKmsKeyVersion() { * * *
    -     * Output only. A Cloud KMS key version that is being used to protect the database or
    -     * backup.
    +     * Output only. A Cloud KMS key version that is being used to protect the
    +     * database or backup.
          * 
    * * diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionInfoOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionInfoOrBuilder.java index ad0cd0444a9..bd281d5d7e0 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionInfoOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/EncryptionInfoOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/common.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface EncryptionInfoOrBuilder @@ -57,9 +57,9 @@ public interface EncryptionInfoOrBuilder * * *
    -   * Output only. If present, the status of a recent encrypt/decrypt call on underlying data
    -   * for this database or backup. Regardless of status, data is always encrypted
    -   * at rest.
    +   * Output only. If present, the status of a recent encrypt/decrypt call on
    +   * underlying data for this database or backup. Regardless of status, data is
    +   * always encrypted at rest.
        * 
    * * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -72,9 +72,9 @@ public interface EncryptionInfoOrBuilder * * *
    -   * Output only. If present, the status of a recent encrypt/decrypt call on underlying data
    -   * for this database or backup. Regardless of status, data is always encrypted
    -   * at rest.
    +   * Output only. If present, the status of a recent encrypt/decrypt call on
    +   * underlying data for this database or backup. Regardless of status, data is
    +   * always encrypted at rest.
        * 
    * * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -87,9 +87,9 @@ public interface EncryptionInfoOrBuilder * * *
    -   * Output only. If present, the status of a recent encrypt/decrypt call on underlying data
    -   * for this database or backup. Regardless of status, data is always encrypted
    -   * at rest.
    +   * Output only. If present, the status of a recent encrypt/decrypt call on
    +   * underlying data for this database or backup. Regardless of status, data is
    +   * always encrypted at rest.
        * 
    * * .google.rpc.Status encryption_status = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -101,8 +101,8 @@ public interface EncryptionInfoOrBuilder * * *
    -   * Output only. A Cloud KMS key version that is being used to protect the database or
    -   * backup.
    +   * Output only. A Cloud KMS key version that is being used to protect the
    +   * database or backup.
        * 
    * * @@ -116,8 +116,8 @@ public interface EncryptionInfoOrBuilder * * *
    -   * Output only. A Cloud KMS key version that is being used to protect the database or
    -   * backup.
    +   * Output only. A Cloud KMS key version that is being used to protect the
    +   * database or backup.
        * 
    * * diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/FullBackupSpec.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/FullBackupSpec.java new file mode 100644 index 00000000000..6c5e6b9c47a --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/FullBackupSpec.java @@ -0,0 +1,436 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/backup.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The specification for full backups.
    + * A full backup stores the entire contents of the database at a given
    + * version time.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.FullBackupSpec} + */ +public final class FullBackupSpec extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.FullBackupSpec) + FullBackupSpecOrBuilder { + private static final long serialVersionUID = 0L; + // Use FullBackupSpec.newBuilder() to construct. + private FullBackupSpec(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private FullBackupSpec() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new FullBackupSpec(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_FullBackupSpec_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_FullBackupSpec_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.FullBackupSpec.class, + com.google.spanner.admin.database.v1.FullBackupSpec.Builder.class); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.FullBackupSpec)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.FullBackupSpec other = + (com.google.spanner.admin.database.v1.FullBackupSpec) obj; + + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.spanner.admin.database.v1.FullBackupSpec prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
    +   * The specification for full backups.
    +   * A full backup stores the entire contents of the database at a given
    +   * version time.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.FullBackupSpec} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.FullBackupSpec) + com.google.spanner.admin.database.v1.FullBackupSpecOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_FullBackupSpec_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_FullBackupSpec_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.FullBackupSpec.class, + com.google.spanner.admin.database.v1.FullBackupSpec.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.FullBackupSpec.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_FullBackupSpec_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.FullBackupSpec getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.FullBackupSpec.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.FullBackupSpec build() { + com.google.spanner.admin.database.v1.FullBackupSpec result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.FullBackupSpec buildPartial() { + com.google.spanner.admin.database.v1.FullBackupSpec result = + new com.google.spanner.admin.database.v1.FullBackupSpec(this); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.FullBackupSpec) { + return mergeFrom((com.google.spanner.admin.database.v1.FullBackupSpec) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.FullBackupSpec other) { + if (other == com.google.spanner.admin.database.v1.FullBackupSpec.getDefaultInstance()) + return this; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.FullBackupSpec) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.FullBackupSpec) + private static final com.google.spanner.admin.database.v1.FullBackupSpec DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.FullBackupSpec(); + } + + public static com.google.spanner.admin.database.v1.FullBackupSpec getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public FullBackupSpec parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.FullBackupSpec getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/FullBackupSpecOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/FullBackupSpecOrBuilder.java new file mode 100644 index 00000000000..814fd9f6377 --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/FullBackupSpecOrBuilder.java @@ -0,0 +1,25 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/backup.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.database.v1; + +public interface FullBackupSpecOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.FullBackupSpec) + com.google.protobuf.MessageOrBuilder {} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupRequest.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupRequest.java index 754018f2c5f..595c1d41ee6 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupRequest.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupRequest.java @@ -16,14 +16,15 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** * * *
    - * The request for [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup].
    + * The request for
    + * [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup].
      * 
    * * Protobuf type {@code google.spanner.admin.database.v1.GetBackupRequest} @@ -286,7 +287,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
    -   * The request for [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup].
    +   * The request for
    +   * [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup].
        * 
    * * Protobuf type {@code google.spanner.admin.database.v1.GetBackupRequest} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupRequestOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupRequestOrBuilder.java index 66d554f6861..329b84341fc 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface GetBackupRequestOrBuilder diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupScheduleRequest.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupScheduleRequest.java new file mode 100644 index 00000000000..f345cb2e1d5 --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupScheduleRequest.java @@ -0,0 +1,660 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/backup_schedule.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [GetBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.GetBackupScheduleRequest} + */ +public final class GetBackupScheduleRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.GetBackupScheduleRequest) + GetBackupScheduleRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use GetBackupScheduleRequest.newBuilder() to construct. + private GetBackupScheduleRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private GetBackupScheduleRequest() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new GetBackupScheduleRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_GetBackupScheduleRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_GetBackupScheduleRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.GetBackupScheduleRequest.class, + com.google.spanner.admin.database.v1.GetBackupScheduleRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + /** + * + * + *
    +   * Required. The name of the schedule to retrieve.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
    +   * Required. The name of the schedule to retrieve.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.GetBackupScheduleRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.GetBackupScheduleRequest other = + (com.google.spanner.admin.database.v1.GetBackupScheduleRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.GetBackupScheduleRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
    +   * The request for
    +   * [GetBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.GetBackupScheduleRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.GetBackupScheduleRequest) + com.google.spanner.admin.database.v1.GetBackupScheduleRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_GetBackupScheduleRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_GetBackupScheduleRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.GetBackupScheduleRequest.class, + com.google.spanner.admin.database.v1.GetBackupScheduleRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.GetBackupScheduleRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_GetBackupScheduleRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.GetBackupScheduleRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.GetBackupScheduleRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.GetBackupScheduleRequest build() { + com.google.spanner.admin.database.v1.GetBackupScheduleRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.GetBackupScheduleRequest buildPartial() { + com.google.spanner.admin.database.v1.GetBackupScheduleRequest result = + new com.google.spanner.admin.database.v1.GetBackupScheduleRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.GetBackupScheduleRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.GetBackupScheduleRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.GetBackupScheduleRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.GetBackupScheduleRequest other) { + if (other + == com.google.spanner.admin.database.v1.GetBackupScheduleRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + /** + * + * + *
    +     * Required. The name of the schedule to retrieve.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
    +     * Required. The name of the schedule to retrieve.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
    +     * Required. The name of the schedule to retrieve.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. The name of the schedule to retrieve.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. The name of the schedule to retrieve.
    +     * Values are of the form
    +     * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.GetBackupScheduleRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.GetBackupScheduleRequest) + private static final com.google.spanner.admin.database.v1.GetBackupScheduleRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.GetBackupScheduleRequest(); + } + + public static com.google.spanner.admin.database.v1.GetBackupScheduleRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetBackupScheduleRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.GetBackupScheduleRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupScheduleRequestOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupScheduleRequestOrBuilder.java new file mode 100644 index 00000000000..855a69b57df --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetBackupScheduleRequestOrBuilder.java @@ -0,0 +1,59 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/backup_schedule.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.database.v1; + +public interface GetBackupScheduleRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.GetBackupScheduleRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The name of the schedule to retrieve.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + *
    +   * Required. The name of the schedule to retrieve.
    +   * Values are of the form
    +   * `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlRequest.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlRequest.java index 5034036df87..b871a491f66 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlRequest.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlRequest.java @@ -16,14 +16,15 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** * * *
    - * The request for [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
    + * The request for
    + * [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
      * 
    * * Protobuf type {@code google.spanner.admin.database.v1.GetDatabaseDdlRequest} @@ -286,7 +287,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
    -   * The request for [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
    +   * The request for
    +   * [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
        * 
    * * Protobuf type {@code google.spanner.admin.database.v1.GetDatabaseDdlRequest} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlRequestOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlRequestOrBuilder.java index 81aaaa1b78b..63c9f230943 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface GetDatabaseDdlRequestOrBuilder diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlResponse.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlResponse.java index 51e682de7a1..0581dcfa7dd 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlResponse.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlResponse.java @@ -16,14 +16,15 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** * * *
    - * The response for [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
    + * The response for
    + * [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
      * 
    * * Protobuf type {@code google.spanner.admin.database.v1.GetDatabaseDdlResponse} @@ -334,7 +335,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
    -   * The response for [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
    +   * The response for
    +   * [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
        * 
    * * Protobuf type {@code google.spanner.admin.database.v1.GetDatabaseDdlResponse} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlResponseOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlResponseOrBuilder.java index 4fd5397ac64..8fd736e032d 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlResponseOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseDdlResponseOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface GetDatabaseDdlResponseOrBuilder diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseRequest.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseRequest.java index 5c58ad99516..c7ff65e5cd9 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseRequest.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseRequest.java @@ -16,14 +16,15 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** * * *
    - * The request for [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase].
    + * The request for
    + * [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase].
      * 
    * * Protobuf type {@code google.spanner.admin.database.v1.GetDatabaseRequest} @@ -284,7 +285,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
    -   * The request for [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase].
    +   * The request for
    +   * [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase].
        * 
    * * Protobuf type {@code google.spanner.admin.database.v1.GetDatabaseRequest} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseRequestOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseRequestOrBuilder.java index 3b1246591a0..20184567075 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/GetDatabaseRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface GetDatabaseRequestOrBuilder diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/IncrementalBackupSpec.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/IncrementalBackupSpec.java new file mode 100644 index 00000000000..05db35c91a5 --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/IncrementalBackupSpec.java @@ -0,0 +1,443 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/backup.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The specification for incremental backup chains.
    + * An incremental backup stores the delta of changes between a previous
    + * backup and the database contents at a given version time. An
    + * incremental backup chain consists of a full backup and zero or more
    + * successive incremental backups. The first backup created for an
    + * incremental backup chain is always a full backup.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.IncrementalBackupSpec} + */ +public final class IncrementalBackupSpec extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.IncrementalBackupSpec) + IncrementalBackupSpecOrBuilder { + private static final long serialVersionUID = 0L; + // Use IncrementalBackupSpec.newBuilder() to construct. + private IncrementalBackupSpec(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private IncrementalBackupSpec() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new IncrementalBackupSpec(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_IncrementalBackupSpec_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_IncrementalBackupSpec_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.IncrementalBackupSpec.class, + com.google.spanner.admin.database.v1.IncrementalBackupSpec.Builder.class); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.IncrementalBackupSpec)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.IncrementalBackupSpec other = + (com.google.spanner.admin.database.v1.IncrementalBackupSpec) obj; + + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.IncrementalBackupSpec prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
    +   * The specification for incremental backup chains.
    +   * An incremental backup stores the delta of changes between a previous
    +   * backup and the database contents at a given version time. An
    +   * incremental backup chain consists of a full backup and zero or more
    +   * successive incremental backups. The first backup created for an
    +   * incremental backup chain is always a full backup.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.IncrementalBackupSpec} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.IncrementalBackupSpec) + com.google.spanner.admin.database.v1.IncrementalBackupSpecOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_IncrementalBackupSpec_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_IncrementalBackupSpec_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.IncrementalBackupSpec.class, + com.google.spanner.admin.database.v1.IncrementalBackupSpec.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.IncrementalBackupSpec.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupProto + .internal_static_google_spanner_admin_database_v1_IncrementalBackupSpec_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.IncrementalBackupSpec getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.IncrementalBackupSpec.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.IncrementalBackupSpec build() { + com.google.spanner.admin.database.v1.IncrementalBackupSpec result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.IncrementalBackupSpec buildPartial() { + com.google.spanner.admin.database.v1.IncrementalBackupSpec result = + new com.google.spanner.admin.database.v1.IncrementalBackupSpec(this); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.IncrementalBackupSpec) { + return mergeFrom((com.google.spanner.admin.database.v1.IncrementalBackupSpec) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.database.v1.IncrementalBackupSpec other) { + if (other == com.google.spanner.admin.database.v1.IncrementalBackupSpec.getDefaultInstance()) + return this; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.IncrementalBackupSpec) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.IncrementalBackupSpec) + private static final com.google.spanner.admin.database.v1.IncrementalBackupSpec DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.IncrementalBackupSpec(); + } + + public static com.google.spanner.admin.database.v1.IncrementalBackupSpec getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public IncrementalBackupSpec parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.IncrementalBackupSpec getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/IncrementalBackupSpecOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/IncrementalBackupSpecOrBuilder.java new file mode 100644 index 00000000000..081548943b6 --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/IncrementalBackupSpecOrBuilder.java @@ -0,0 +1,25 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/backup.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.database.v1; + +public interface IncrementalBackupSpecOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.IncrementalBackupSpec) + com.google.protobuf.MessageOrBuilder {} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsRequest.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsRequest.java index bfc50f4d21f..a9b50cb29c2 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsRequest.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** @@ -145,7 +145,9 @@ public com.google.protobuf.ByteString getParentBytes() { * * `name` - The name of the long-running operation * * `done` - False if the operation is in progress, else true. * * `metadata.@type` - the type of metadata. For example, the type string - * for [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] is + * for + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + * is * `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`. * * `metadata.<field_name>` - any field in metadata.value. * `metadata.@type` must be specified first if filtering on metadata @@ -163,14 +165,15 @@ public com.google.protobuf.ByteString getParentBytes() { * * `done:true` - The operation is complete. * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ * `metadata.database:prod` - Returns operations where: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. - * * The database the backup was taken from has a name containing the - * string "prod". + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + * * The source database name of backup contains the string "prod". * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ * `(metadata.name:howl) AND` \ * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ * `(error:*)` - Returns operations where: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. * * The backup name contains the string "howl". * * The operation started before 2018-03-28T14:50:00Z. * * The operation resulted in an error. @@ -178,9 +181,9 @@ public com.google.protobuf.ByteString getParentBytes() { * `(metadata.source_backup:test) AND` \ * `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \ * `(error:*)` - Returns operations where: - * * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. - * * The source backup of the copied backup name contains the string - * "test". + * * The operation's metadata type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. + * * The source backup name contains the string "test". * * The operation started before 2022-01-18T14:50:00Z. * * The operation resulted in an error. * * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ @@ -190,12 +193,13 @@ public com.google.protobuf.ByteString getParentBytes() { * `(metadata.source_backup:test_bkp)) AND` \ * `(error:*)` - Returns operations where: * * The operation's metadata matches either of criteria: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] AND the - * database the backup was taken from has name containing string + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + * AND the source database name of the backup contains the string * "test_db" - * * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] AND the - * backup the backup was copied from has name containing string - * "test_bkp" + * * The operation's metadata type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] + * AND the source backup name contains the string "test_bkp" * * The operation resulted in an error. *
    * @@ -233,7 +237,9 @@ public java.lang.String getFilter() { * * `name` - The name of the long-running operation * * `done` - False if the operation is in progress, else true. * * `metadata.@type` - the type of metadata. For example, the type string - * for [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] is + * for + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + * is * `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`. * * `metadata.<field_name>` - any field in metadata.value. * `metadata.@type` must be specified first if filtering on metadata @@ -251,14 +257,15 @@ public java.lang.String getFilter() { * * `done:true` - The operation is complete. * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ * `metadata.database:prod` - Returns operations where: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. - * * The database the backup was taken from has a name containing the - * string "prod". + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + * * The source database name of backup contains the string "prod". * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ * `(metadata.name:howl) AND` \ * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ * `(error:*)` - Returns operations where: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. * * The backup name contains the string "howl". * * The operation started before 2018-03-28T14:50:00Z. * * The operation resulted in an error. @@ -266,9 +273,9 @@ public java.lang.String getFilter() { * `(metadata.source_backup:test) AND` \ * `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \ * `(error:*)` - Returns operations where: - * * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. - * * The source backup of the copied backup name contains the string - * "test". + * * The operation's metadata type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. + * * The source backup name contains the string "test". * * The operation started before 2022-01-18T14:50:00Z. * * The operation resulted in an error. * * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ @@ -278,12 +285,13 @@ public java.lang.String getFilter() { * `(metadata.source_backup:test_bkp)) AND` \ * `(error:*)` - Returns operations where: * * The operation's metadata matches either of criteria: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] AND the - * database the backup was taken from has name containing string + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + * AND the source database name of the backup contains the string * "test_db" - * * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] AND the - * backup the backup was copied from has name containing string - * "test_bkp" + * * The operation's metadata type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] + * AND the source backup name contains the string "test_bkp" * * The operation resulted in an error. *
    * @@ -333,8 +341,9 @@ public int getPageSize() { *
        * If non-empty, `page_token` should contain a
        * [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token]
    -   * from a previous [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse] to the
    -   * same `parent` and with the same `filter`.
    +   * from a previous
    +   * [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse]
    +   * to the same `parent` and with the same `filter`.
        * 
    * * string page_token = 4; @@ -359,8 +368,9 @@ public java.lang.String getPageToken() { *
        * If non-empty, `page_token` should contain a
        * [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token]
    -   * from a previous [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse] to the
    -   * same `parent` and with the same `filter`.
    +   * from a previous
    +   * [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse]
    +   * to the same `parent` and with the same `filter`.
        * 
    * * string page_token = 4; @@ -938,7 +948,9 @@ public Builder setParentBytes(com.google.protobuf.ByteString value) { * * `name` - The name of the long-running operation * * `done` - False if the operation is in progress, else true. * * `metadata.@type` - the type of metadata. For example, the type string - * for [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] is + * for + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + * is * `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`. * * `metadata.<field_name>` - any field in metadata.value. * `metadata.@type` must be specified first if filtering on metadata @@ -956,14 +968,15 @@ public Builder setParentBytes(com.google.protobuf.ByteString value) { * * `done:true` - The operation is complete. * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ * `metadata.database:prod` - Returns operations where: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. - * * The database the backup was taken from has a name containing the - * string "prod". + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + * * The source database name of backup contains the string "prod". * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ * `(metadata.name:howl) AND` \ * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ * `(error:*)` - Returns operations where: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. * * The backup name contains the string "howl". * * The operation started before 2018-03-28T14:50:00Z. * * The operation resulted in an error. @@ -971,9 +984,9 @@ public Builder setParentBytes(com.google.protobuf.ByteString value) { * `(metadata.source_backup:test) AND` \ * `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \ * `(error:*)` - Returns operations where: - * * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. - * * The source backup of the copied backup name contains the string - * "test". + * * The operation's metadata type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. + * * The source backup name contains the string "test". * * The operation started before 2022-01-18T14:50:00Z. * * The operation resulted in an error. * * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ @@ -983,12 +996,13 @@ public Builder setParentBytes(com.google.protobuf.ByteString value) { * `(metadata.source_backup:test_bkp)) AND` \ * `(error:*)` - Returns operations where: * * The operation's metadata matches either of criteria: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] AND the - * database the backup was taken from has name containing string + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + * AND the source database name of the backup contains the string * "test_db" - * * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] AND the - * backup the backup was copied from has name containing string - * "test_bkp" + * * The operation's metadata type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] + * AND the source backup name contains the string "test_bkp" * * The operation resulted in an error. *
    * @@ -1025,7 +1039,9 @@ public java.lang.String getFilter() { * * `name` - The name of the long-running operation * * `done` - False if the operation is in progress, else true. * * `metadata.@type` - the type of metadata. For example, the type string - * for [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] is + * for + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + * is * `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`. * * `metadata.<field_name>` - any field in metadata.value. * `metadata.@type` must be specified first if filtering on metadata @@ -1043,14 +1059,15 @@ public java.lang.String getFilter() { * * `done:true` - The operation is complete. * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ * `metadata.database:prod` - Returns operations where: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. - * * The database the backup was taken from has a name containing the - * string "prod". + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + * * The source database name of backup contains the string "prod". * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ * `(metadata.name:howl) AND` \ * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ * `(error:*)` - Returns operations where: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. * * The backup name contains the string "howl". * * The operation started before 2018-03-28T14:50:00Z. * * The operation resulted in an error. @@ -1058,9 +1075,9 @@ public java.lang.String getFilter() { * `(metadata.source_backup:test) AND` \ * `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \ * `(error:*)` - Returns operations where: - * * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. - * * The source backup of the copied backup name contains the string - * "test". + * * The operation's metadata type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. + * * The source backup name contains the string "test". * * The operation started before 2022-01-18T14:50:00Z. * * The operation resulted in an error. * * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ @@ -1070,12 +1087,13 @@ public java.lang.String getFilter() { * `(metadata.source_backup:test_bkp)) AND` \ * `(error:*)` - Returns operations where: * * The operation's metadata matches either of criteria: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] AND the - * database the backup was taken from has name containing string + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + * AND the source database name of the backup contains the string * "test_db" - * * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] AND the - * backup the backup was copied from has name containing string - * "test_bkp" + * * The operation's metadata type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] + * AND the source backup name contains the string "test_bkp" * * The operation resulted in an error. *
    * @@ -1112,7 +1130,9 @@ public com.google.protobuf.ByteString getFilterBytes() { * * `name` - The name of the long-running operation * * `done` - False if the operation is in progress, else true. * * `metadata.@type` - the type of metadata. For example, the type string - * for [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] is + * for + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + * is * `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`. * * `metadata.<field_name>` - any field in metadata.value. * `metadata.@type` must be specified first if filtering on metadata @@ -1130,14 +1150,15 @@ public com.google.protobuf.ByteString getFilterBytes() { * * `done:true` - The operation is complete. * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ * `metadata.database:prod` - Returns operations where: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. - * * The database the backup was taken from has a name containing the - * string "prod". + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + * * The source database name of backup contains the string "prod". * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ * `(metadata.name:howl) AND` \ * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ * `(error:*)` - Returns operations where: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. * * The backup name contains the string "howl". * * The operation started before 2018-03-28T14:50:00Z. * * The operation resulted in an error. @@ -1145,9 +1166,9 @@ public com.google.protobuf.ByteString getFilterBytes() { * `(metadata.source_backup:test) AND` \ * `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \ * `(error:*)` - Returns operations where: - * * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. - * * The source backup of the copied backup name contains the string - * "test". + * * The operation's metadata type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. + * * The source backup name contains the string "test". * * The operation started before 2022-01-18T14:50:00Z. * * The operation resulted in an error. * * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ @@ -1157,12 +1178,13 @@ public com.google.protobuf.ByteString getFilterBytes() { * `(metadata.source_backup:test_bkp)) AND` \ * `(error:*)` - Returns operations where: * * The operation's metadata matches either of criteria: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] AND the - * database the backup was taken from has name containing string + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + * AND the source database name of the backup contains the string * "test_db" - * * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] AND the - * backup the backup was copied from has name containing string - * "test_bkp" + * * The operation's metadata type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] + * AND the source backup name contains the string "test_bkp" * * The operation resulted in an error. *
    * @@ -1198,7 +1220,9 @@ public Builder setFilter(java.lang.String value) { * * `name` - The name of the long-running operation * * `done` - False if the operation is in progress, else true. * * `metadata.@type` - the type of metadata. For example, the type string - * for [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] is + * for + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + * is * `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`. * * `metadata.<field_name>` - any field in metadata.value. * `metadata.@type` must be specified first if filtering on metadata @@ -1216,14 +1240,15 @@ public Builder setFilter(java.lang.String value) { * * `done:true` - The operation is complete. * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ * `metadata.database:prod` - Returns operations where: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. - * * The database the backup was taken from has a name containing the - * string "prod". + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + * * The source database name of backup contains the string "prod". * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ * `(metadata.name:howl) AND` \ * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ * `(error:*)` - Returns operations where: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. * * The backup name contains the string "howl". * * The operation started before 2018-03-28T14:50:00Z. * * The operation resulted in an error. @@ -1231,9 +1256,9 @@ public Builder setFilter(java.lang.String value) { * `(metadata.source_backup:test) AND` \ * `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \ * `(error:*)` - Returns operations where: - * * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. - * * The source backup of the copied backup name contains the string - * "test". + * * The operation's metadata type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. + * * The source backup name contains the string "test". * * The operation started before 2022-01-18T14:50:00Z. * * The operation resulted in an error. * * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ @@ -1243,12 +1268,13 @@ public Builder setFilter(java.lang.String value) { * `(metadata.source_backup:test_bkp)) AND` \ * `(error:*)` - Returns operations where: * * The operation's metadata matches either of criteria: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] AND the - * database the backup was taken from has name containing string + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + * AND the source database name of the backup contains the string * "test_db" - * * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] AND the - * backup the backup was copied from has name containing string - * "test_bkp" + * * The operation's metadata type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] + * AND the source backup name contains the string "test_bkp" * * The operation resulted in an error. *
    * @@ -1280,7 +1306,9 @@ public Builder clearFilter() { * * `name` - The name of the long-running operation * * `done` - False if the operation is in progress, else true. * * `metadata.@type` - the type of metadata. For example, the type string - * for [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] is + * for + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + * is * `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`. * * `metadata.<field_name>` - any field in metadata.value. * `metadata.@type` must be specified first if filtering on metadata @@ -1298,14 +1326,15 @@ public Builder clearFilter() { * * `done:true` - The operation is complete. * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ * `metadata.database:prod` - Returns operations where: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. - * * The database the backup was taken from has a name containing the - * string "prod". + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + * * The source database name of backup contains the string "prod". * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ * `(metadata.name:howl) AND` \ * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ * `(error:*)` - Returns operations where: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. * * The backup name contains the string "howl". * * The operation started before 2018-03-28T14:50:00Z. * * The operation resulted in an error. @@ -1313,9 +1342,9 @@ public Builder clearFilter() { * `(metadata.source_backup:test) AND` \ * `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \ * `(error:*)` - Returns operations where: - * * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. - * * The source backup of the copied backup name contains the string - * "test". + * * The operation's metadata type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. + * * The source backup name contains the string "test". * * The operation started before 2022-01-18T14:50:00Z. * * The operation resulted in an error. * * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ @@ -1325,12 +1354,13 @@ public Builder clearFilter() { * `(metadata.source_backup:test_bkp)) AND` \ * `(error:*)` - Returns operations where: * * The operation's metadata matches either of criteria: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] AND the - * database the backup was taken from has name containing string + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + * AND the source database name of the backup contains the string * "test_db" - * * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] AND the - * backup the backup was copied from has name containing string - * "test_bkp" + * * The operation's metadata type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] + * AND the source backup name contains the string "test_bkp" * * The operation resulted in an error. *
    * @@ -1413,8 +1443,9 @@ public Builder clearPageSize() { *
          * If non-empty, `page_token` should contain a
          * [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token]
    -     * from a previous [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse] to the
    -     * same `parent` and with the same `filter`.
    +     * from a previous
    +     * [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
          * 
    * * string page_token = 4; @@ -1438,8 +1469,9 @@ public java.lang.String getPageToken() { *
          * If non-empty, `page_token` should contain a
          * [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token]
    -     * from a previous [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse] to the
    -     * same `parent` and with the same `filter`.
    +     * from a previous
    +     * [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
          * 
    * * string page_token = 4; @@ -1463,8 +1495,9 @@ public com.google.protobuf.ByteString getPageTokenBytes() { *
          * If non-empty, `page_token` should contain a
          * [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token]
    -     * from a previous [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse] to the
    -     * same `parent` and with the same `filter`.
    +     * from a previous
    +     * [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
          * 
    * * string page_token = 4; @@ -1487,8 +1520,9 @@ public Builder setPageToken(java.lang.String value) { *
          * If non-empty, `page_token` should contain a
          * [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token]
    -     * from a previous [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse] to the
    -     * same `parent` and with the same `filter`.
    +     * from a previous
    +     * [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
          * 
    * * string page_token = 4; @@ -1507,8 +1541,9 @@ public Builder clearPageToken() { *
          * If non-empty, `page_token` should contain a
          * [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token]
    -     * from a previous [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse] to the
    -     * same `parent` and with the same `filter`.
    +     * from a previous
    +     * [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
          * 
    * * string page_token = 4; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsRequestOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsRequestOrBuilder.java index b2487386273..9cc01d96cc8 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface ListBackupOperationsRequestOrBuilder @@ -73,7 +73,9 @@ public interface ListBackupOperationsRequestOrBuilder * * `name` - The name of the long-running operation * * `done` - False if the operation is in progress, else true. * * `metadata.@type` - the type of metadata. For example, the type string - * for [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] is + * for + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + * is * `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`. * * `metadata.<field_name>` - any field in metadata.value. * `metadata.@type` must be specified first if filtering on metadata @@ -91,14 +93,15 @@ public interface ListBackupOperationsRequestOrBuilder * * `done:true` - The operation is complete. * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ * `metadata.database:prod` - Returns operations where: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. - * * The database the backup was taken from has a name containing the - * string "prod". + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + * * The source database name of backup contains the string "prod". * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ * `(metadata.name:howl) AND` \ * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ * `(error:*)` - Returns operations where: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. * * The backup name contains the string "howl". * * The operation started before 2018-03-28T14:50:00Z. * * The operation resulted in an error. @@ -106,9 +109,9 @@ public interface ListBackupOperationsRequestOrBuilder * `(metadata.source_backup:test) AND` \ * `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \ * `(error:*)` - Returns operations where: - * * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. - * * The source backup of the copied backup name contains the string - * "test". + * * The operation's metadata type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. + * * The source backup name contains the string "test". * * The operation started before 2022-01-18T14:50:00Z. * * The operation resulted in an error. * * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ @@ -118,12 +121,13 @@ public interface ListBackupOperationsRequestOrBuilder * `(metadata.source_backup:test_bkp)) AND` \ * `(error:*)` - Returns operations where: * * The operation's metadata matches either of criteria: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] AND the - * database the backup was taken from has name containing string + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + * AND the source database name of the backup contains the string * "test_db" - * * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] AND the - * backup the backup was copied from has name containing string - * "test_bkp" + * * The operation's metadata type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] + * AND the source backup name contains the string "test_bkp" * * The operation resulted in an error. *
    * @@ -150,7 +154,9 @@ public interface ListBackupOperationsRequestOrBuilder * * `name` - The name of the long-running operation * * `done` - False if the operation is in progress, else true. * * `metadata.@type` - the type of metadata. For example, the type string - * for [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] is + * for + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + * is * `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`. * * `metadata.<field_name>` - any field in metadata.value. * `metadata.@type` must be specified first if filtering on metadata @@ -168,14 +174,15 @@ public interface ListBackupOperationsRequestOrBuilder * * `done:true` - The operation is complete. * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ * `metadata.database:prod` - Returns operations where: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. - * * The database the backup was taken from has a name containing the - * string "prod". + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + * * The source database name of backup contains the string "prod". * * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ * `(metadata.name:howl) AND` \ * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ * `(error:*)` - Returns operations where: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. * * The backup name contains the string "howl". * * The operation started before 2018-03-28T14:50:00Z. * * The operation resulted in an error. @@ -183,9 +190,9 @@ public interface ListBackupOperationsRequestOrBuilder * `(metadata.source_backup:test) AND` \ * `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \ * `(error:*)` - Returns operations where: - * * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. - * * The source backup of the copied backup name contains the string - * "test". + * * The operation's metadata type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. + * * The source backup name contains the string "test". * * The operation started before 2022-01-18T14:50:00Z. * * The operation resulted in an error. * * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ @@ -195,12 +202,13 @@ public interface ListBackupOperationsRequestOrBuilder * `(metadata.source_backup:test_bkp)) AND` \ * `(error:*)` - Returns operations where: * * The operation's metadata matches either of criteria: - * * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] AND the - * database the backup was taken from has name containing string + * * The operation's metadata type is + * [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + * AND the source database name of the backup contains the string * "test_db" - * * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] AND the - * backup the backup was copied from has name containing string - * "test_bkp" + * * The operation's metadata type is + * [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] + * AND the source backup name contains the string "test_bkp" * * The operation resulted in an error. *
    * @@ -230,8 +238,9 @@ public interface ListBackupOperationsRequestOrBuilder *
        * If non-empty, `page_token` should contain a
        * [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token]
    -   * from a previous [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse] to the
    -   * same `parent` and with the same `filter`.
    +   * from a previous
    +   * [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse]
    +   * to the same `parent` and with the same `filter`.
        * 
    * * string page_token = 4; @@ -245,8 +254,9 @@ public interface ListBackupOperationsRequestOrBuilder *
        * If non-empty, `page_token` should contain a
        * [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token]
    -   * from a previous [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse] to the
    -   * same `parent` and with the same `filter`.
    +   * from a previous
    +   * [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse]
    +   * to the same `parent` and with the same `filter`.
        * 
    * * string page_token = 4; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsResponse.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsResponse.java index e47a71e4e36..8dd2402d3ac 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsResponse.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsResponse.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsResponseOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsResponseOrBuilder.java index 9a3f8544c59..a7b07c0739a 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsResponseOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupOperationsResponseOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface ListBackupOperationsResponseOrBuilder diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupSchedulesRequest.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupSchedulesRequest.java new file mode 100644 index 00000000000..ee49c05d120 --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupSchedulesRequest.java @@ -0,0 +1,970 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/backup_schedule.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListBackupSchedulesRequest} + */ +public final class ListBackupSchedulesRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.ListBackupSchedulesRequest) + ListBackupSchedulesRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use ListBackupSchedulesRequest.newBuilder() to construct. + private ListBackupSchedulesRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ListBackupSchedulesRequest() { + parent_ = ""; + pageToken_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ListBackupSchedulesRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_ListBackupSchedulesRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_ListBackupSchedulesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest.class, + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object parent_ = ""; + /** + * + * + *
    +   * Required. Database is the parent resource whose backup schedules should be
    +   * listed. Values are of the form
    +   * projects/<project>/instances/<instance>/databases/<database>
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + @java.lang.Override + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + /** + * + * + *
    +   * Required. Database is the parent resource whose backup schedules should be
    +   * listed. Values are of the form
    +   * projects/<project>/instances/<instance>/databases/<database>
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + @java.lang.Override + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PAGE_SIZE_FIELD_NUMBER = 2; + private int pageSize_ = 0; + /** + * + * + *
    +   * Optional. Number of backup schedules to be returned in the response. If 0
    +   * or less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + + public static final int PAGE_TOKEN_FIELD_NUMBER = 4; + + @SuppressWarnings("serial") + private volatile java.lang.Object pageToken_ = ""; + /** + * + * + *
    +   * Optional. If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token]
    +   * from a previous
    +   * [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse]
    +   * to the same `parent`.
    +   * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + @java.lang.Override + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } + } + /** + * + * + *
    +   * Optional. If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token]
    +   * from a previous
    +   * [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse]
    +   * to the same `parent`.
    +   * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + if (pageSize_ != 0) { + output.writeInt32(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(pageToken_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 4, pageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(parent_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + if (pageSize_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, pageSize_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(pageToken_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, pageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.ListBackupSchedulesRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest other = + (com.google.spanner.admin.database.v1.ListBackupSchedulesRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (getPageSize() != other.getPageSize()) return false; + if (!getPageToken().equals(other.getPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + hash = (37 * hash) + PAGE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + getPageSize(); + hash = (37 * hash) + PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
    +   * The request for
    +   * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListBackupSchedulesRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.ListBackupSchedulesRequest) + com.google.spanner.admin.database.v1.ListBackupSchedulesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_ListBackupSchedulesRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_ListBackupSchedulesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest.class, + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.ListBackupSchedulesRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + parent_ = ""; + pageSize_ = 0; + pageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_ListBackupSchedulesRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupSchedulesRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.ListBackupSchedulesRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupSchedulesRequest build() { + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupSchedulesRequest buildPartial() { + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest result = + new com.google.spanner.admin.database.v1.ListBackupSchedulesRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.parent_ = parent_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.pageSize_ = pageSize_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.pageToken_ = pageToken_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.ListBackupSchedulesRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.ListBackupSchedulesRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.database.v1.ListBackupSchedulesRequest other) { + if (other + == com.google.spanner.admin.database.v1.ListBackupSchedulesRequest.getDefaultInstance()) + return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.getPageSize() != 0) { + setPageSize(other.getPageSize()); + } + if (!other.getPageToken().isEmpty()) { + pageToken_ = other.pageToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + parent_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + pageSize_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + case 34: + { + pageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 34 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + /** + * + * + *
    +     * Required. Database is the parent resource whose backup schedules should be
    +     * listed. Values are of the form
    +     * projects/<project>/instances/<instance>/databases/<database>
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
    +     * Required. Database is the parent resource whose backup schedules should be
    +     * listed. Values are of the form
    +     * projects/<project>/instances/<instance>/databases/<database>
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
    +     * Required. Database is the parent resource whose backup schedules should be
    +     * listed. Values are of the form
    +     * projects/<project>/instances/<instance>/databases/<database>
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. Database is the parent resource whose backup schedules should be
    +     * listed. Values are of the form
    +     * projects/<project>/instances/<instance>/databases/<database>
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + parent_ = getDefaultInstance().getParent(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. Database is the parent resource whose backup schedules should be
    +     * listed. Values are of the form
    +     * projects/<project>/instances/<instance>/databases/<database>
    +     * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + parent_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private int pageSize_; + /** + * + * + *
    +     * Optional. Number of backup schedules to be returned in the response. If 0
    +     * or less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + @java.lang.Override + public int getPageSize() { + return pageSize_; + } + /** + * + * + *
    +     * Optional. Number of backup schedules to be returned in the response. If 0
    +     * or less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageSize to set. + * @return This builder for chaining. + */ + public Builder setPageSize(int value) { + + pageSize_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. Number of backup schedules to be returned in the response. If 0
    +     * or less, defaults to the server's maximum allowed page size.
    +     * 
    + * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageSize() { + bitField0_ = (bitField0_ & ~0x00000002); + pageSize_ = 0; + onChanged(); + return this; + } + + private java.lang.Object pageToken_ = ""; + /** + * + * + *
    +     * Optional. If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse]
    +     * to the same `parent`.
    +     * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + public java.lang.String getPageToken() { + java.lang.Object ref = pageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + pageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
    +     * Optional. If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse]
    +     * to the same `parent`.
    +     * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + public com.google.protobuf.ByteString getPageTokenBytes() { + java.lang.Object ref = pageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + pageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
    +     * Optional. If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse]
    +     * to the same `parent`.
    +     * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse]
    +     * to the same `parent`.
    +     * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearPageToken() { + pageToken_ = getDefaultInstance().getPageToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. If non-empty, `page_token` should contain a
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse]
    +     * to the same `parent`.
    +     * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for pageToken to set. + * @return This builder for chaining. + */ + public Builder setPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + pageToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.ListBackupSchedulesRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListBackupSchedulesRequest) + private static final com.google.spanner.admin.database.v1.ListBackupSchedulesRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.ListBackupSchedulesRequest(); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListBackupSchedulesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupSchedulesRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupSchedulesRequestOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupSchedulesRequestOrBuilder.java new file mode 100644 index 00000000000..814e0381050 --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupSchedulesRequestOrBuilder.java @@ -0,0 +1,106 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/backup_schedule.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.database.v1; + +public interface ListBackupSchedulesRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.ListBackupSchedulesRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. Database is the parent resource whose backup schedules should be
    +   * listed. Values are of the form
    +   * projects/<project>/instances/<instance>/databases/<database>
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + /** + * + * + *
    +   * Required. Database is the parent resource whose backup schedules should be
    +   * listed. Values are of the form
    +   * projects/<project>/instances/<instance>/databases/<database>
    +   * 
    + * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
    +   * Optional. Number of backup schedules to be returned in the response. If 0
    +   * or less, defaults to the server's maximum allowed page size.
    +   * 
    + * + * int32 page_size = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageSize. + */ + int getPageSize(); + + /** + * + * + *
    +   * Optional. If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token]
    +   * from a previous
    +   * [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse]
    +   * to the same `parent`.
    +   * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The pageToken. + */ + java.lang.String getPageToken(); + /** + * + * + *
    +   * Optional. If non-empty, `page_token` should contain a
    +   * [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token]
    +   * from a previous
    +   * [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse]
    +   * to the same `parent`.
    +   * 
    + * + * string page_token = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for pageToken. + */ + com.google.protobuf.ByteString getPageTokenBytes(); +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupSchedulesResponse.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupSchedulesResponse.java new file mode 100644 index 00000000000..d393318524c --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupSchedulesResponse.java @@ -0,0 +1,1158 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/backup_schedule.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The response for
    + * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListBackupSchedulesResponse} + */ +public final class ListBackupSchedulesResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.ListBackupSchedulesResponse) + ListBackupSchedulesResponseOrBuilder { + private static final long serialVersionUID = 0L; + // Use ListBackupSchedulesResponse.newBuilder() to construct. + private ListBackupSchedulesResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ListBackupSchedulesResponse() { + backupSchedules_ = java.util.Collections.emptyList(); + nextPageToken_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ListBackupSchedulesResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_ListBackupSchedulesResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_ListBackupSchedulesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse.class, + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse.Builder.class); + } + + public static final int BACKUP_SCHEDULES_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private java.util.List backupSchedules_; + /** + * + * + *
    +   * The list of backup schedules for a database.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + @java.lang.Override + public java.util.List + getBackupSchedulesList() { + return backupSchedules_; + } + /** + * + * + *
    +   * The list of backup schedules for a database.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + @java.lang.Override + public java.util.List + getBackupSchedulesOrBuilderList() { + return backupSchedules_; + } + /** + * + * + *
    +   * The list of backup schedules for a database.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + @java.lang.Override + public int getBackupSchedulesCount() { + return backupSchedules_.size(); + } + /** + * + * + *
    +   * The list of backup schedules for a database.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupSchedule getBackupSchedules(int index) { + return backupSchedules_.get(index); + } + /** + * + * + *
    +   * The list of backup schedules for a database.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupScheduleOrBuilder getBackupSchedulesOrBuilder( + int index) { + return backupSchedules_.get(index); + } + + public static final int NEXT_PAGE_TOKEN_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object nextPageToken_ = ""; + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]
    +   * call to fetch more of the schedules.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + @java.lang.Override + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } + } + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]
    +   * call to fetch more of the schedules.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < backupSchedules_.size(); i++) { + output.writeMessage(1, backupSchedules_.get(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(nextPageToken_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, nextPageToken_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < backupSchedules_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, backupSchedules_.get(i)); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(nextPageToken_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, nextPageToken_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.ListBackupSchedulesResponse)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse other = + (com.google.spanner.admin.database.v1.ListBackupSchedulesResponse) obj; + + if (!getBackupSchedulesList().equals(other.getBackupSchedulesList())) return false; + if (!getNextPageToken().equals(other.getNextPageToken())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getBackupSchedulesCount() > 0) { + hash = (37 * hash) + BACKUP_SCHEDULES_FIELD_NUMBER; + hash = (53 * hash) + getBackupSchedulesList().hashCode(); + } + hash = (37 * hash) + NEXT_PAGE_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getNextPageToken().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
    +   * The response for
    +   * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.ListBackupSchedulesResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.ListBackupSchedulesResponse) + com.google.spanner.admin.database.v1.ListBackupSchedulesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_ListBackupSchedulesResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_ListBackupSchedulesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse.class, + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.ListBackupSchedulesResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (backupSchedulesBuilder_ == null) { + backupSchedules_ = java.util.Collections.emptyList(); + } else { + backupSchedules_ = null; + backupSchedulesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + nextPageToken_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_ListBackupSchedulesResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupSchedulesResponse + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.ListBackupSchedulesResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupSchedulesResponse build() { + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupSchedulesResponse buildPartial() { + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse result = + new com.google.spanner.admin.database.v1.ListBackupSchedulesResponse(this); + buildPartialRepeatedFields(result); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse result) { + if (backupSchedulesBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + backupSchedules_ = java.util.Collections.unmodifiableList(backupSchedules_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.backupSchedules_ = backupSchedules_; + } else { + result.backupSchedules_ = backupSchedulesBuilder_.build(); + } + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.nextPageToken_ = nextPageToken_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.ListBackupSchedulesResponse) { + return mergeFrom((com.google.spanner.admin.database.v1.ListBackupSchedulesResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.database.v1.ListBackupSchedulesResponse other) { + if (other + == com.google.spanner.admin.database.v1.ListBackupSchedulesResponse.getDefaultInstance()) + return this; + if (backupSchedulesBuilder_ == null) { + if (!other.backupSchedules_.isEmpty()) { + if (backupSchedules_.isEmpty()) { + backupSchedules_ = other.backupSchedules_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureBackupSchedulesIsMutable(); + backupSchedules_.addAll(other.backupSchedules_); + } + onChanged(); + } + } else { + if (!other.backupSchedules_.isEmpty()) { + if (backupSchedulesBuilder_.isEmpty()) { + backupSchedulesBuilder_.dispose(); + backupSchedulesBuilder_ = null; + backupSchedules_ = other.backupSchedules_; + bitField0_ = (bitField0_ & ~0x00000001); + backupSchedulesBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getBackupSchedulesFieldBuilder() + : null; + } else { + backupSchedulesBuilder_.addAllMessages(other.backupSchedules_); + } + } + } + if (!other.getNextPageToken().isEmpty()) { + nextPageToken_ = other.nextPageToken_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.spanner.admin.database.v1.BackupSchedule m = + input.readMessage( + com.google.spanner.admin.database.v1.BackupSchedule.parser(), + extensionRegistry); + if (backupSchedulesBuilder_ == null) { + ensureBackupSchedulesIsMutable(); + backupSchedules_.add(m); + } else { + backupSchedulesBuilder_.addMessage(m); + } + break; + } // case 10 + case 18: + { + nextPageToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.util.List backupSchedules_ = + java.util.Collections.emptyList(); + + private void ensureBackupSchedulesIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + backupSchedules_ = + new java.util.ArrayList( + backupSchedules_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.spanner.admin.database.v1.BackupSchedule, + com.google.spanner.admin.database.v1.BackupSchedule.Builder, + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder> + backupSchedulesBuilder_; + + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public java.util.List + getBackupSchedulesList() { + if (backupSchedulesBuilder_ == null) { + return java.util.Collections.unmodifiableList(backupSchedules_); + } else { + return backupSchedulesBuilder_.getMessageList(); + } + } + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public int getBackupSchedulesCount() { + if (backupSchedulesBuilder_ == null) { + return backupSchedules_.size(); + } else { + return backupSchedulesBuilder_.getCount(); + } + } + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public com.google.spanner.admin.database.v1.BackupSchedule getBackupSchedules(int index) { + if (backupSchedulesBuilder_ == null) { + return backupSchedules_.get(index); + } else { + return backupSchedulesBuilder_.getMessage(index); + } + } + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public Builder setBackupSchedules( + int index, com.google.spanner.admin.database.v1.BackupSchedule value) { + if (backupSchedulesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureBackupSchedulesIsMutable(); + backupSchedules_.set(index, value); + onChanged(); + } else { + backupSchedulesBuilder_.setMessage(index, value); + } + return this; + } + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public Builder setBackupSchedules( + int index, com.google.spanner.admin.database.v1.BackupSchedule.Builder builderForValue) { + if (backupSchedulesBuilder_ == null) { + ensureBackupSchedulesIsMutable(); + backupSchedules_.set(index, builderForValue.build()); + onChanged(); + } else { + backupSchedulesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public Builder addBackupSchedules(com.google.spanner.admin.database.v1.BackupSchedule value) { + if (backupSchedulesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureBackupSchedulesIsMutable(); + backupSchedules_.add(value); + onChanged(); + } else { + backupSchedulesBuilder_.addMessage(value); + } + return this; + } + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public Builder addBackupSchedules( + int index, com.google.spanner.admin.database.v1.BackupSchedule value) { + if (backupSchedulesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureBackupSchedulesIsMutable(); + backupSchedules_.add(index, value); + onChanged(); + } else { + backupSchedulesBuilder_.addMessage(index, value); + } + return this; + } + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public Builder addBackupSchedules( + com.google.spanner.admin.database.v1.BackupSchedule.Builder builderForValue) { + if (backupSchedulesBuilder_ == null) { + ensureBackupSchedulesIsMutable(); + backupSchedules_.add(builderForValue.build()); + onChanged(); + } else { + backupSchedulesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public Builder addBackupSchedules( + int index, com.google.spanner.admin.database.v1.BackupSchedule.Builder builderForValue) { + if (backupSchedulesBuilder_ == null) { + ensureBackupSchedulesIsMutable(); + backupSchedules_.add(index, builderForValue.build()); + onChanged(); + } else { + backupSchedulesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public Builder addAllBackupSchedules( + java.lang.Iterable values) { + if (backupSchedulesBuilder_ == null) { + ensureBackupSchedulesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, backupSchedules_); + onChanged(); + } else { + backupSchedulesBuilder_.addAllMessages(values); + } + return this; + } + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public Builder clearBackupSchedules() { + if (backupSchedulesBuilder_ == null) { + backupSchedules_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + backupSchedulesBuilder_.clear(); + } + return this; + } + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public Builder removeBackupSchedules(int index) { + if (backupSchedulesBuilder_ == null) { + ensureBackupSchedulesIsMutable(); + backupSchedules_.remove(index); + onChanged(); + } else { + backupSchedulesBuilder_.remove(index); + } + return this; + } + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public com.google.spanner.admin.database.v1.BackupSchedule.Builder getBackupSchedulesBuilder( + int index) { + return getBackupSchedulesFieldBuilder().getBuilder(index); + } + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public com.google.spanner.admin.database.v1.BackupScheduleOrBuilder getBackupSchedulesOrBuilder( + int index) { + if (backupSchedulesBuilder_ == null) { + return backupSchedules_.get(index); + } else { + return backupSchedulesBuilder_.getMessageOrBuilder(index); + } + } + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public java.util.List + getBackupSchedulesOrBuilderList() { + if (backupSchedulesBuilder_ != null) { + return backupSchedulesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(backupSchedules_); + } + } + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public com.google.spanner.admin.database.v1.BackupSchedule.Builder addBackupSchedulesBuilder() { + return getBackupSchedulesFieldBuilder() + .addBuilder(com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance()); + } + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public com.google.spanner.admin.database.v1.BackupSchedule.Builder addBackupSchedulesBuilder( + int index) { + return getBackupSchedulesFieldBuilder() + .addBuilder( + index, com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance()); + } + /** + * + * + *
    +     * The list of backup schedules for a database.
    +     * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + public java.util.List + getBackupSchedulesBuilderList() { + return getBackupSchedulesFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.spanner.admin.database.v1.BackupSchedule, + com.google.spanner.admin.database.v1.BackupSchedule.Builder, + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder> + getBackupSchedulesFieldBuilder() { + if (backupSchedulesBuilder_ == null) { + backupSchedulesBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.spanner.admin.database.v1.BackupSchedule, + com.google.spanner.admin.database.v1.BackupSchedule.Builder, + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder>( + backupSchedules_, + ((bitField0_ & 0x00000001) != 0), + getParentForChildren(), + isClean()); + backupSchedules_ = null; + } + return backupSchedulesBuilder_; + } + + private java.lang.Object nextPageToken_ = ""; + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]
    +     * call to fetch more of the schedules.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + public java.lang.String getNextPageToken() { + java.lang.Object ref = nextPageToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nextPageToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]
    +     * call to fetch more of the schedules.
    +     * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + public com.google.protobuf.ByteString getNextPageTokenBytes() { + java.lang.Object ref = nextPageToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + nextPageToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]
    +     * call to fetch more of the schedules.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]
    +     * call to fetch more of the schedules.
    +     * 
    + * + * string next_page_token = 2; + * + * @return This builder for chaining. + */ + public Builder clearNextPageToken() { + nextPageToken_ = getDefaultInstance().getNextPageToken(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * + * + *
    +     * `next_page_token` can be sent in a subsequent
    +     * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]
    +     * call to fetch more of the schedules.
    +     * 
    + * + * string next_page_token = 2; + * + * @param value The bytes for nextPageToken to set. + * @return This builder for chaining. + */ + public Builder setNextPageTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nextPageToken_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.ListBackupSchedulesResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListBackupSchedulesResponse) + private static final com.google.spanner.admin.database.v1.ListBackupSchedulesResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.ListBackupSchedulesResponse(); + } + + public static com.google.spanner.admin.database.v1.ListBackupSchedulesResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ListBackupSchedulesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.ListBackupSchedulesResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupSchedulesResponseOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupSchedulesResponseOrBuilder.java new file mode 100644 index 00000000000..c7866312b67 --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupSchedulesResponseOrBuilder.java @@ -0,0 +1,108 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/backup_schedule.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.database.v1; + +public interface ListBackupSchedulesResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.ListBackupSchedulesResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The list of backup schedules for a database.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + java.util.List getBackupSchedulesList(); + /** + * + * + *
    +   * The list of backup schedules for a database.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + com.google.spanner.admin.database.v1.BackupSchedule getBackupSchedules(int index); + /** + * + * + *
    +   * The list of backup schedules for a database.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + int getBackupSchedulesCount(); + /** + * + * + *
    +   * The list of backup schedules for a database.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + java.util.List + getBackupSchedulesOrBuilderList(); + /** + * + * + *
    +   * The list of backup schedules for a database.
    +   * 
    + * + * repeated .google.spanner.admin.database.v1.BackupSchedule backup_schedules = 1; + */ + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder getBackupSchedulesOrBuilder( + int index); + + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]
    +   * call to fetch more of the schedules.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The nextPageToken. + */ + java.lang.String getNextPageToken(); + /** + * + * + *
    +   * `next_page_token` can be sent in a subsequent
    +   * [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]
    +   * call to fetch more of the schedules.
    +   * 
    + * + * string next_page_token = 2; + * + * @return The bytes for nextPageToken. + */ + com.google.protobuf.ByteString getNextPageTokenBytes(); +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsRequest.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsRequest.java index 247a933977c..0bb010ea335 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsRequest.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsRequest.java @@ -16,14 +16,15 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** * * *
    - * The request for [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
    + * The request for
    + * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
      * 
    * * Protobuf type {@code google.spanner.admin.database.v1.ListBackupsRequest} @@ -138,7 +139,9 @@ public com.google.protobuf.ByteString getParentBytes() { * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. * Colon `:` is the contains operator. Filter rules are not case sensitive. * - * The following fields in the [Backup][google.spanner.admin.database.v1.Backup] are eligible for filtering: + * The following fields in the + * [Backup][google.spanner.admin.database.v1.Backup] are eligible for + * filtering: * * * `name` * * `database` @@ -147,6 +150,7 @@ public com.google.protobuf.ByteString getParentBytes() { * * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * * `size_bytes` + * * `backup_schedules` * * You can combine multiple expressions by enclosing each expression in * parentheses. By default, expressions are combined with AND logic, but @@ -165,6 +169,8 @@ public com.google.protobuf.ByteString getParentBytes() { * * `expire_time < \"2018-03-28T14:50:00Z\"` * - The backup `expire_time` is before 2018-03-28T14:50:00Z. * * `size_bytes > 10000000000` - The backup's size is greater than 10GB + * * `backup_schedules:daily` + * - The backup is created from a schedule with "daily" in its name. *
    * * string filter = 2; @@ -195,7 +201,9 @@ public java.lang.String getFilter() { * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. * Colon `:` is the contains operator. Filter rules are not case sensitive. * - * The following fields in the [Backup][google.spanner.admin.database.v1.Backup] are eligible for filtering: + * The following fields in the + * [Backup][google.spanner.admin.database.v1.Backup] are eligible for + * filtering: * * * `name` * * `database` @@ -204,6 +212,7 @@ public java.lang.String getFilter() { * * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * * `size_bytes` + * * `backup_schedules` * * You can combine multiple expressions by enclosing each expression in * parentheses. By default, expressions are combined with AND logic, but @@ -222,6 +231,8 @@ public java.lang.String getFilter() { * * `expire_time < \"2018-03-28T14:50:00Z\"` * - The backup `expire_time` is before 2018-03-28T14:50:00Z. * * `size_bytes > 10000000000` - The backup's size is greater than 10GB + * * `backup_schedules:daily` + * - The backup is created from a schedule with "daily" in its name. *
    * * string filter = 2; @@ -269,9 +280,10 @@ public int getPageSize() { * *
        * If non-empty, `page_token` should contain a
    -   * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token] from a
    -   * previous [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse] to the same `parent` and with the same
    -   * `filter`.
    +   * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token]
    +   * from a previous
    +   * [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse]
    +   * to the same `parent` and with the same `filter`.
        * 
    * * string page_token = 4; @@ -295,9 +307,10 @@ public java.lang.String getPageToken() { * *
        * If non-empty, `page_token` should contain a
    -   * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token] from a
    -   * previous [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse] to the same `parent` and with the same
    -   * `filter`.
    +   * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token]
    +   * from a previous
    +   * [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse]
    +   * to the same `parent` and with the same `filter`.
        * 
    * * string page_token = 4; @@ -508,7 +521,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
    -   * The request for [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
    +   * The request for
    +   * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
        * 
    * * Protobuf type {@code google.spanner.admin.database.v1.ListBackupsRequest} @@ -864,7 +878,9 @@ public Builder setParentBytes(com.google.protobuf.ByteString value) { * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. * Colon `:` is the contains operator. Filter rules are not case sensitive. * - * The following fields in the [Backup][google.spanner.admin.database.v1.Backup] are eligible for filtering: + * The following fields in the + * [Backup][google.spanner.admin.database.v1.Backup] are eligible for + * filtering: * * * `name` * * `database` @@ -873,6 +889,7 @@ public Builder setParentBytes(com.google.protobuf.ByteString value) { * * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * * `size_bytes` + * * `backup_schedules` * * You can combine multiple expressions by enclosing each expression in * parentheses. By default, expressions are combined with AND logic, but @@ -891,6 +908,8 @@ public Builder setParentBytes(com.google.protobuf.ByteString value) { * * `expire_time < \"2018-03-28T14:50:00Z\"` * - The backup `expire_time` is before 2018-03-28T14:50:00Z. * * `size_bytes > 10000000000` - The backup's size is greater than 10GB + * * `backup_schedules:daily` + * - The backup is created from a schedule with "daily" in its name. *
    * * string filter = 2; @@ -920,7 +939,9 @@ public java.lang.String getFilter() { * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. * Colon `:` is the contains operator. Filter rules are not case sensitive. * - * The following fields in the [Backup][google.spanner.admin.database.v1.Backup] are eligible for filtering: + * The following fields in the + * [Backup][google.spanner.admin.database.v1.Backup] are eligible for + * filtering: * * * `name` * * `database` @@ -929,6 +950,7 @@ public java.lang.String getFilter() { * * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * * `size_bytes` + * * `backup_schedules` * * You can combine multiple expressions by enclosing each expression in * parentheses. By default, expressions are combined with AND logic, but @@ -947,6 +969,8 @@ public java.lang.String getFilter() { * * `expire_time < \"2018-03-28T14:50:00Z\"` * - The backup `expire_time` is before 2018-03-28T14:50:00Z. * * `size_bytes > 10000000000` - The backup's size is greater than 10GB + * * `backup_schedules:daily` + * - The backup is created from a schedule with "daily" in its name. *
    * * string filter = 2; @@ -976,7 +1000,9 @@ public com.google.protobuf.ByteString getFilterBytes() { * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. * Colon `:` is the contains operator. Filter rules are not case sensitive. * - * The following fields in the [Backup][google.spanner.admin.database.v1.Backup] are eligible for filtering: + * The following fields in the + * [Backup][google.spanner.admin.database.v1.Backup] are eligible for + * filtering: * * * `name` * * `database` @@ -985,6 +1011,7 @@ public com.google.protobuf.ByteString getFilterBytes() { * * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * * `size_bytes` + * * `backup_schedules` * * You can combine multiple expressions by enclosing each expression in * parentheses. By default, expressions are combined with AND logic, but @@ -1003,6 +1030,8 @@ public com.google.protobuf.ByteString getFilterBytes() { * * `expire_time < \"2018-03-28T14:50:00Z\"` * - The backup `expire_time` is before 2018-03-28T14:50:00Z. * * `size_bytes > 10000000000` - The backup's size is greater than 10GB + * * `backup_schedules:daily` + * - The backup is created from a schedule with "daily" in its name. *
    * * string filter = 2; @@ -1031,7 +1060,9 @@ public Builder setFilter(java.lang.String value) { * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. * Colon `:` is the contains operator. Filter rules are not case sensitive. * - * The following fields in the [Backup][google.spanner.admin.database.v1.Backup] are eligible for filtering: + * The following fields in the + * [Backup][google.spanner.admin.database.v1.Backup] are eligible for + * filtering: * * * `name` * * `database` @@ -1040,6 +1071,7 @@ public Builder setFilter(java.lang.String value) { * * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * * `size_bytes` + * * `backup_schedules` * * You can combine multiple expressions by enclosing each expression in * parentheses. By default, expressions are combined with AND logic, but @@ -1058,6 +1090,8 @@ public Builder setFilter(java.lang.String value) { * * `expire_time < \"2018-03-28T14:50:00Z\"` * - The backup `expire_time` is before 2018-03-28T14:50:00Z. * * `size_bytes > 10000000000` - The backup's size is greater than 10GB + * * `backup_schedules:daily` + * - The backup is created from a schedule with "daily" in its name. *
    * * string filter = 2; @@ -1082,7 +1116,9 @@ public Builder clearFilter() { * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. * Colon `:` is the contains operator. Filter rules are not case sensitive. * - * The following fields in the [Backup][google.spanner.admin.database.v1.Backup] are eligible for filtering: + * The following fields in the + * [Backup][google.spanner.admin.database.v1.Backup] are eligible for + * filtering: * * * `name` * * `database` @@ -1091,6 +1127,7 @@ public Builder clearFilter() { * * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * * `size_bytes` + * * `backup_schedules` * * You can combine multiple expressions by enclosing each expression in * parentheses. By default, expressions are combined with AND logic, but @@ -1109,6 +1146,8 @@ public Builder clearFilter() { * * `expire_time < \"2018-03-28T14:50:00Z\"` * - The backup `expire_time` is before 2018-03-28T14:50:00Z. * * `size_bytes > 10000000000` - The backup's size is greater than 10GB + * * `backup_schedules:daily` + * - The backup is created from a schedule with "daily" in its name. *
    * * string filter = 2; @@ -1189,9 +1228,10 @@ public Builder clearPageSize() { * *
          * If non-empty, `page_token` should contain a
    -     * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token] from a
    -     * previous [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse] to the same `parent` and with the same
    -     * `filter`.
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse]
    +     * to the same `parent` and with the same `filter`.
          * 
    * * string page_token = 4; @@ -1214,9 +1254,10 @@ public java.lang.String getPageToken() { * *
          * If non-empty, `page_token` should contain a
    -     * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token] from a
    -     * previous [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse] to the same `parent` and with the same
    -     * `filter`.
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse]
    +     * to the same `parent` and with the same `filter`.
          * 
    * * string page_token = 4; @@ -1239,9 +1280,10 @@ public com.google.protobuf.ByteString getPageTokenBytes() { * *
          * If non-empty, `page_token` should contain a
    -     * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token] from a
    -     * previous [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse] to the same `parent` and with the same
    -     * `filter`.
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse]
    +     * to the same `parent` and with the same `filter`.
          * 
    * * string page_token = 4; @@ -1263,9 +1305,10 @@ public Builder setPageToken(java.lang.String value) { * *
          * If non-empty, `page_token` should contain a
    -     * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token] from a
    -     * previous [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse] to the same `parent` and with the same
    -     * `filter`.
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse]
    +     * to the same `parent` and with the same `filter`.
          * 
    * * string page_token = 4; @@ -1283,9 +1326,10 @@ public Builder clearPageToken() { * *
          * If non-empty, `page_token` should contain a
    -     * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token] from a
    -     * previous [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse] to the same `parent` and with the same
    -     * `filter`.
    +     * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token]
    +     * from a previous
    +     * [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse]
    +     * to the same `parent` and with the same `filter`.
          * 
    * * string page_token = 4; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsRequestOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsRequestOrBuilder.java index 7af3cb176db..834fcbdd9c4 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface ListBackupsRequestOrBuilder @@ -67,7 +67,9 @@ public interface ListBackupsRequestOrBuilder * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. * Colon `:` is the contains operator. Filter rules are not case sensitive. * - * The following fields in the [Backup][google.spanner.admin.database.v1.Backup] are eligible for filtering: + * The following fields in the + * [Backup][google.spanner.admin.database.v1.Backup] are eligible for + * filtering: * * * `name` * * `database` @@ -76,6 +78,7 @@ public interface ListBackupsRequestOrBuilder * * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * * `size_bytes` + * * `backup_schedules` * * You can combine multiple expressions by enclosing each expression in * parentheses. By default, expressions are combined with AND logic, but @@ -94,6 +97,8 @@ public interface ListBackupsRequestOrBuilder * * `expire_time < \"2018-03-28T14:50:00Z\"` * - The backup `expire_time` is before 2018-03-28T14:50:00Z. * * `size_bytes > 10000000000` - The backup's size is greater than 10GB + * * `backup_schedules:daily` + * - The backup is created from a schedule with "daily" in its name. *
    * * string filter = 2; @@ -113,7 +118,9 @@ public interface ListBackupsRequestOrBuilder * must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. * Colon `:` is the contains operator. Filter rules are not case sensitive. * - * The following fields in the [Backup][google.spanner.admin.database.v1.Backup] are eligible for filtering: + * The following fields in the + * [Backup][google.spanner.admin.database.v1.Backup] are eligible for + * filtering: * * * `name` * * `database` @@ -122,6 +129,7 @@ public interface ListBackupsRequestOrBuilder * * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) * * `size_bytes` + * * `backup_schedules` * * You can combine multiple expressions by enclosing each expression in * parentheses. By default, expressions are combined with AND logic, but @@ -140,6 +148,8 @@ public interface ListBackupsRequestOrBuilder * * `expire_time < \"2018-03-28T14:50:00Z\"` * - The backup `expire_time` is before 2018-03-28T14:50:00Z. * * `size_bytes > 10000000000` - The backup's size is greater than 10GB + * * `backup_schedules:daily` + * - The backup is created from a schedule with "daily" in its name. *
    * * string filter = 2; @@ -167,9 +177,10 @@ public interface ListBackupsRequestOrBuilder * *
        * If non-empty, `page_token` should contain a
    -   * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token] from a
    -   * previous [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse] to the same `parent` and with the same
    -   * `filter`.
    +   * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token]
    +   * from a previous
    +   * [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse]
    +   * to the same `parent` and with the same `filter`.
        * 
    * * string page_token = 4; @@ -182,9 +193,10 @@ public interface ListBackupsRequestOrBuilder * *
        * If non-empty, `page_token` should contain a
    -   * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token] from a
    -   * previous [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse] to the same `parent` and with the same
    -   * `filter`.
    +   * [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token]
    +   * from a previous
    +   * [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse]
    +   * to the same `parent` and with the same `filter`.
        * 
    * * string page_token = 4; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsResponse.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsResponse.java index acf9ccce6b7..575092a046a 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsResponse.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsResponse.java @@ -16,14 +16,15 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** * * *
    - * The response for [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
    + * The response for
    + * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
      * 
    * * Protobuf type {@code google.spanner.admin.database.v1.ListBackupsResponse} @@ -149,8 +150,8 @@ public com.google.spanner.admin.database.v1.BackupOrBuilder getBackupsOrBuilder( * *
        * `next_page_token` can be sent in a subsequent
    -   * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups] call to fetch more
    -   * of the matching backups.
    +   * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]
    +   * call to fetch more of the matching backups.
        * 
    * * string next_page_token = 2; @@ -174,8 +175,8 @@ public java.lang.String getNextPageToken() { * *
        * `next_page_token` can be sent in a subsequent
    -   * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups] call to fetch more
    -   * of the matching backups.
    +   * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]
    +   * call to fetch more of the matching backups.
        * 
    * * string next_page_token = 2; @@ -370,7 +371,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
    -   * The response for [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
    +   * The response for
    +   * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
        * 
    * * Protobuf type {@code google.spanner.admin.database.v1.ListBackupsResponse} @@ -983,8 +985,8 @@ public com.google.spanner.admin.database.v1.Backup.Builder addBackupsBuilder(int * *
          * `next_page_token` can be sent in a subsequent
    -     * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups] call to fetch more
    -     * of the matching backups.
    +     * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]
    +     * call to fetch more of the matching backups.
          * 
    * * string next_page_token = 2; @@ -1007,8 +1009,8 @@ public java.lang.String getNextPageToken() { * *
          * `next_page_token` can be sent in a subsequent
    -     * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups] call to fetch more
    -     * of the matching backups.
    +     * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]
    +     * call to fetch more of the matching backups.
          * 
    * * string next_page_token = 2; @@ -1031,8 +1033,8 @@ public com.google.protobuf.ByteString getNextPageTokenBytes() { * *
          * `next_page_token` can be sent in a subsequent
    -     * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups] call to fetch more
    -     * of the matching backups.
    +     * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]
    +     * call to fetch more of the matching backups.
          * 
    * * string next_page_token = 2; @@ -1054,8 +1056,8 @@ public Builder setNextPageToken(java.lang.String value) { * *
          * `next_page_token` can be sent in a subsequent
    -     * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups] call to fetch more
    -     * of the matching backups.
    +     * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]
    +     * call to fetch more of the matching backups.
          * 
    * * string next_page_token = 2; @@ -1073,8 +1075,8 @@ public Builder clearNextPageToken() { * *
          * `next_page_token` can be sent in a subsequent
    -     * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups] call to fetch more
    -     * of the matching backups.
    +     * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]
    +     * call to fetch more of the matching backups.
          * 
    * * string next_page_token = 2; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsResponseOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsResponseOrBuilder.java index dc7e7aa1530..89dacd534d1 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsResponseOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListBackupsResponseOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface ListBackupsResponseOrBuilder @@ -86,8 +86,8 @@ public interface ListBackupsResponseOrBuilder * *
        * `next_page_token` can be sent in a subsequent
    -   * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups] call to fetch more
    -   * of the matching backups.
    +   * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]
    +   * call to fetch more of the matching backups.
        * 
    * * string next_page_token = 2; @@ -100,8 +100,8 @@ public interface ListBackupsResponseOrBuilder * *
        * `next_page_token` can be sent in a subsequent
    -   * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups] call to fetch more
    -   * of the matching backups.
    +   * [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]
    +   * call to fetch more of the matching backups.
        * 
    * * string next_page_token = 2; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsRequest.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsRequest.java index 3e974d3c90b..61bd575ac45 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsRequest.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** @@ -145,7 +145,9 @@ public com.google.protobuf.ByteString getParentBytes() { * * `name` - The name of the long-running operation * * `done` - False if the operation is in progress, else true. * * `metadata.@type` - the type of metadata. For example, the type string - * for [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] is + * for + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] + * is * `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`. * * `metadata.<field_name>` - any field in metadata.value. * `metadata.@type` must be specified first, if filtering on metadata @@ -167,7 +169,8 @@ public com.google.protobuf.ByteString getParentBytes() { * `(metadata.name:restored_howl) AND` \ * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ * `(error:*)` - Return operations where: - * * The operation's metadata type is [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. + * * The operation's metadata type is + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. * * The database is restored from a backup. * * The backup name contains "backup_howl". * * The restored database's name contains "restored_howl". @@ -209,7 +212,9 @@ public java.lang.String getFilter() { * * `name` - The name of the long-running operation * * `done` - False if the operation is in progress, else true. * * `metadata.@type` - the type of metadata. For example, the type string - * for [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] is + * for + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] + * is * `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`. * * `metadata.<field_name>` - any field in metadata.value. * `metadata.@type` must be specified first, if filtering on metadata @@ -231,7 +236,8 @@ public java.lang.String getFilter() { * `(metadata.name:restored_howl) AND` \ * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ * `(error:*)` - Return operations where: - * * The operation's metadata type is [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. + * * The operation's metadata type is + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. * * The database is restored from a backup. * * The backup name contains "backup_howl". * * The restored database's name contains "restored_howl". @@ -285,8 +291,9 @@ public int getPageSize() { *
        * If non-empty, `page_token` should contain a
        * [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token]
    -   * from a previous [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse] to the
    -   * same `parent` and with the same `filter`.
    +   * from a previous
    +   * [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse]
    +   * to the same `parent` and with the same `filter`.
        * 
    * * string page_token = 4; @@ -311,8 +318,9 @@ public java.lang.String getPageToken() { *
        * If non-empty, `page_token` should contain a
        * [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token]
    -   * from a previous [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse] to the
    -   * same `parent` and with the same `filter`.
    +   * from a previous
    +   * [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse]
    +   * to the same `parent` and with the same `filter`.
        * 
    * * string page_token = 4; @@ -894,7 +902,9 @@ public Builder setParentBytes(com.google.protobuf.ByteString value) { * * `name` - The name of the long-running operation * * `done` - False if the operation is in progress, else true. * * `metadata.@type` - the type of metadata. For example, the type string - * for [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] is + * for + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] + * is * `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`. * * `metadata.<field_name>` - any field in metadata.value. * `metadata.@type` must be specified first, if filtering on metadata @@ -916,7 +926,8 @@ public Builder setParentBytes(com.google.protobuf.ByteString value) { * `(metadata.name:restored_howl) AND` \ * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ * `(error:*)` - Return operations where: - * * The operation's metadata type is [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. + * * The operation's metadata type is + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. * * The database is restored from a backup. * * The backup name contains "backup_howl". * * The restored database's name contains "restored_howl". @@ -957,7 +968,9 @@ public java.lang.String getFilter() { * * `name` - The name of the long-running operation * * `done` - False if the operation is in progress, else true. * * `metadata.@type` - the type of metadata. For example, the type string - * for [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] is + * for + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] + * is * `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`. * * `metadata.<field_name>` - any field in metadata.value. * `metadata.@type` must be specified first, if filtering on metadata @@ -979,7 +992,8 @@ public java.lang.String getFilter() { * `(metadata.name:restored_howl) AND` \ * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ * `(error:*)` - Return operations where: - * * The operation's metadata type is [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. + * * The operation's metadata type is + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. * * The database is restored from a backup. * * The backup name contains "backup_howl". * * The restored database's name contains "restored_howl". @@ -1020,7 +1034,9 @@ public com.google.protobuf.ByteString getFilterBytes() { * * `name` - The name of the long-running operation * * `done` - False if the operation is in progress, else true. * * `metadata.@type` - the type of metadata. For example, the type string - * for [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] is + * for + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] + * is * `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`. * * `metadata.<field_name>` - any field in metadata.value. * `metadata.@type` must be specified first, if filtering on metadata @@ -1042,7 +1058,8 @@ public com.google.protobuf.ByteString getFilterBytes() { * `(metadata.name:restored_howl) AND` \ * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ * `(error:*)` - Return operations where: - * * The operation's metadata type is [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. + * * The operation's metadata type is + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. * * The database is restored from a backup. * * The backup name contains "backup_howl". * * The restored database's name contains "restored_howl". @@ -1082,7 +1099,9 @@ public Builder setFilter(java.lang.String value) { * * `name` - The name of the long-running operation * * `done` - False if the operation is in progress, else true. * * `metadata.@type` - the type of metadata. For example, the type string - * for [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] is + * for + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] + * is * `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`. * * `metadata.<field_name>` - any field in metadata.value. * `metadata.@type` must be specified first, if filtering on metadata @@ -1104,7 +1123,8 @@ public Builder setFilter(java.lang.String value) { * `(metadata.name:restored_howl) AND` \ * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ * `(error:*)` - Return operations where: - * * The operation's metadata type is [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. + * * The operation's metadata type is + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. * * The database is restored from a backup. * * The backup name contains "backup_howl". * * The restored database's name contains "restored_howl". @@ -1140,7 +1160,9 @@ public Builder clearFilter() { * * `name` - The name of the long-running operation * * `done` - False if the operation is in progress, else true. * * `metadata.@type` - the type of metadata. For example, the type string - * for [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] is + * for + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] + * is * `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`. * * `metadata.<field_name>` - any field in metadata.value. * `metadata.@type` must be specified first, if filtering on metadata @@ -1162,7 +1184,8 @@ public Builder clearFilter() { * `(metadata.name:restored_howl) AND` \ * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ * `(error:*)` - Return operations where: - * * The operation's metadata type is [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. + * * The operation's metadata type is + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. * * The database is restored from a backup. * * The backup name contains "backup_howl". * * The restored database's name contains "restored_howl". @@ -1249,8 +1272,9 @@ public Builder clearPageSize() { *
          * If non-empty, `page_token` should contain a
          * [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token]
    -     * from a previous [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse] to the
    -     * same `parent` and with the same `filter`.
    +     * from a previous
    +     * [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
          * 
    * * string page_token = 4; @@ -1274,8 +1298,9 @@ public java.lang.String getPageToken() { *
          * If non-empty, `page_token` should contain a
          * [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token]
    -     * from a previous [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse] to the
    -     * same `parent` and with the same `filter`.
    +     * from a previous
    +     * [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
          * 
    * * string page_token = 4; @@ -1299,8 +1324,9 @@ public com.google.protobuf.ByteString getPageTokenBytes() { *
          * If non-empty, `page_token` should contain a
          * [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token]
    -     * from a previous [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse] to the
    -     * same `parent` and with the same `filter`.
    +     * from a previous
    +     * [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
          * 
    * * string page_token = 4; @@ -1323,8 +1349,9 @@ public Builder setPageToken(java.lang.String value) { *
          * If non-empty, `page_token` should contain a
          * [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token]
    -     * from a previous [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse] to the
    -     * same `parent` and with the same `filter`.
    +     * from a previous
    +     * [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
          * 
    * * string page_token = 4; @@ -1343,8 +1370,9 @@ public Builder clearPageToken() { *
          * If non-empty, `page_token` should contain a
          * [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token]
    -     * from a previous [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse] to the
    -     * same `parent` and with the same `filter`.
    +     * from a previous
    +     * [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse]
    +     * to the same `parent` and with the same `filter`.
          * 
    * * string page_token = 4; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsRequestOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsRequestOrBuilder.java index 26c9fdad785..8b545cdf491 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface ListDatabaseOperationsRequestOrBuilder @@ -73,7 +73,9 @@ public interface ListDatabaseOperationsRequestOrBuilder * * `name` - The name of the long-running operation * * `done` - False if the operation is in progress, else true. * * `metadata.@type` - the type of metadata. For example, the type string - * for [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] is + * for + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] + * is * `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`. * * `metadata.<field_name>` - any field in metadata.value. * `metadata.@type` must be specified first, if filtering on metadata @@ -95,7 +97,8 @@ public interface ListDatabaseOperationsRequestOrBuilder * `(metadata.name:restored_howl) AND` \ * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ * `(error:*)` - Return operations where: - * * The operation's metadata type is [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. + * * The operation's metadata type is + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. * * The database is restored from a backup. * * The backup name contains "backup_howl". * * The restored database's name contains "restored_howl". @@ -126,7 +129,9 @@ public interface ListDatabaseOperationsRequestOrBuilder * * `name` - The name of the long-running operation * * `done` - False if the operation is in progress, else true. * * `metadata.@type` - the type of metadata. For example, the type string - * for [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] is + * for + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] + * is * `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`. * * `metadata.<field_name>` - any field in metadata.value. * `metadata.@type` must be specified first, if filtering on metadata @@ -148,7 +153,8 @@ public interface ListDatabaseOperationsRequestOrBuilder * `(metadata.name:restored_howl) AND` \ * `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ * `(error:*)` - Return operations where: - * * The operation's metadata type is [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. + * * The operation's metadata type is + * [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. * * The database is restored from a backup. * * The backup name contains "backup_howl". * * The restored database's name contains "restored_howl". @@ -182,8 +188,9 @@ public interface ListDatabaseOperationsRequestOrBuilder *
        * If non-empty, `page_token` should contain a
        * [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token]
    -   * from a previous [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse] to the
    -   * same `parent` and with the same `filter`.
    +   * from a previous
    +   * [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse]
    +   * to the same `parent` and with the same `filter`.
        * 
    * * string page_token = 4; @@ -197,8 +204,9 @@ public interface ListDatabaseOperationsRequestOrBuilder *
        * If non-empty, `page_token` should contain a
        * [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token]
    -   * from a previous [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse] to the
    -   * same `parent` and with the same `filter`.
    +   * from a previous
    +   * [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse]
    +   * to the same `parent` and with the same `filter`.
        * 
    * * string page_token = 4; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsResponse.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsResponse.java index 6142a9887cd..ec69a81d1f0 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsResponse.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsResponse.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsResponseOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsResponseOrBuilder.java index 13215e0424a..6c8001fbbf4 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsResponseOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseOperationsResponseOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface ListDatabaseOperationsResponseOrBuilder diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesRequest.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesRequest.java index b1d185e6521..6caecc9fdc6 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesRequest.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesRequest.java @@ -16,14 +16,15 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** * * *
    - * The request for [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
    + * The request for
    + * [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
      * 
    * * Protobuf type {@code google.spanner.admin.database.v1.ListDatabaseRolesRequest} @@ -74,7 +75,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { *
        * Required. The database whose roles should be listed.
        * Values are of the form
    -   * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles`.
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
        * 
    * * @@ -101,7 +102,7 @@ public java.lang.String getParent() { *
        * Required. The database whose roles should be listed.
        * Values are of the form
    -   * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles`.
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
        * 
    * * @@ -151,8 +152,9 @@ public int getPageSize() { * *
        * If non-empty, `page_token` should contain a
    -   * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token] from a
    -   * previous [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
    +   * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token]
    +   * from a previous
    +   * [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
        * 
    * * string page_token = 3; @@ -176,8 +178,9 @@ public java.lang.String getPageToken() { * *
        * If non-empty, `page_token` should contain a
    -   * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token] from a
    -   * previous [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
    +   * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token]
    +   * from a previous
    +   * [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
        * 
    * * string page_token = 3; @@ -379,7 +382,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
    -   * The request for [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
    +   * The request for
    +   * [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
        * 
    * * Protobuf type {@code google.spanner.admin.database.v1.ListDatabaseRolesRequest} @@ -596,7 +600,7 @@ public Builder mergeFrom( *
          * Required. The database whose roles should be listed.
          * Values are of the form
    -     * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles`.
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
          * 
    * * @@ -622,7 +626,7 @@ public java.lang.String getParent() { *
          * Required. The database whose roles should be listed.
          * Values are of the form
    -     * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles`.
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
          * 
    * * @@ -648,7 +652,7 @@ public com.google.protobuf.ByteString getParentBytes() { *
          * Required. The database whose roles should be listed.
          * Values are of the form
    -     * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles`.
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
          * 
    * * @@ -673,7 +677,7 @@ public Builder setParent(java.lang.String value) { *
          * Required. The database whose roles should be listed.
          * Values are of the form
    -     * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles`.
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
          * 
    * * @@ -694,7 +698,7 @@ public Builder clearParent() { *
          * Required. The database whose roles should be listed.
          * Values are of the form
    -     * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles`.
    +     * `projects/<project>/instances/<instance>/databases/<database>`.
          * 
    * * @@ -777,8 +781,9 @@ public Builder clearPageSize() { * *
          * If non-empty, `page_token` should contain a
    -     * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token] from a
    -     * previous [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
          * 
    * * string page_token = 3; @@ -801,8 +806,9 @@ public java.lang.String getPageToken() { * *
          * If non-empty, `page_token` should contain a
    -     * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token] from a
    -     * previous [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
          * 
    * * string page_token = 3; @@ -825,8 +831,9 @@ public com.google.protobuf.ByteString getPageTokenBytes() { * *
          * If non-empty, `page_token` should contain a
    -     * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token] from a
    -     * previous [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
          * 
    * * string page_token = 3; @@ -848,8 +855,9 @@ public Builder setPageToken(java.lang.String value) { * *
          * If non-empty, `page_token` should contain a
    -     * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token] from a
    -     * previous [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
          * 
    * * string page_token = 3; @@ -867,8 +875,9 @@ public Builder clearPageToken() { * *
          * If non-empty, `page_token` should contain a
    -     * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token] from a
    -     * previous [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
          * 
    * * string page_token = 3; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesRequestOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesRequestOrBuilder.java index 9ee520170e8..45bbe33472d 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface ListDatabaseRolesRequestOrBuilder @@ -30,7 +30,7 @@ public interface ListDatabaseRolesRequestOrBuilder *
        * Required. The database whose roles should be listed.
        * Values are of the form
    -   * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles`.
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
        * 
    * * @@ -46,7 +46,7 @@ public interface ListDatabaseRolesRequestOrBuilder *
        * Required. The database whose roles should be listed.
        * Values are of the form
    -   * `projects/<project>/instances/<instance>/databases/<database>/databaseRoles`.
    +   * `projects/<project>/instances/<instance>/databases/<database>`.
        * 
    * * @@ -76,8 +76,9 @@ public interface ListDatabaseRolesRequestOrBuilder * *
        * If non-empty, `page_token` should contain a
    -   * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token] from a
    -   * previous [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
    +   * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token]
    +   * from a previous
    +   * [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
        * 
    * * string page_token = 3; @@ -90,8 +91,9 @@ public interface ListDatabaseRolesRequestOrBuilder * *
        * If non-empty, `page_token` should contain a
    -   * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token] from a
    -   * previous [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
    +   * [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token]
    +   * from a previous
    +   * [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
        * 
    * * string page_token = 3; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesResponse.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesResponse.java index ad74092d04c..260c110d226 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesResponse.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesResponse.java @@ -16,14 +16,15 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** * * *
    - * The response for [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
    + * The response for
    + * [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
      * 
    * * Protobuf type {@code google.spanner.admin.database.v1.ListDatabaseRolesResponse} @@ -366,7 +367,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
    -   * The response for [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
    +   * The response for
    +   * [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
        * 
    * * Protobuf type {@code google.spanner.admin.database.v1.ListDatabaseRolesResponse} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesResponseOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesResponseOrBuilder.java index 29b2dc08e83..ffedad7d5a2 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesResponseOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabaseRolesResponseOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface ListDatabaseRolesResponseOrBuilder diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesRequest.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesRequest.java index ae7da65a373..fa4da40dace 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesRequest.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesRequest.java @@ -16,14 +16,15 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** * * *
    - * The request for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
    + * The request for
    + * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
      * 
    * * Protobuf type {@code google.spanner.admin.database.v1.ListDatabasesRequest} @@ -149,8 +150,9 @@ public int getPageSize() { * *
        * If non-empty, `page_token` should contain a
    -   * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] from a
    -   * previous [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
    +   * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token]
    +   * from a previous
    +   * [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
        * 
    * * string page_token = 4; @@ -174,8 +176,9 @@ public java.lang.String getPageToken() { * *
        * If non-empty, `page_token` should contain a
    -   * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] from a
    -   * previous [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
    +   * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token]
    +   * from a previous
    +   * [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
        * 
    * * string page_token = 4; @@ -377,7 +380,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
    -   * The request for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
    +   * The request for
    +   * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
        * 
    * * Protobuf type {@code google.spanner.admin.database.v1.ListDatabasesRequest} @@ -767,8 +771,9 @@ public Builder clearPageSize() { * *
          * If non-empty, `page_token` should contain a
    -     * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] from a
    -     * previous [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
          * 
    * * string page_token = 4; @@ -791,8 +796,9 @@ public java.lang.String getPageToken() { * *
          * If non-empty, `page_token` should contain a
    -     * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] from a
    -     * previous [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
          * 
    * * string page_token = 4; @@ -815,8 +821,9 @@ public com.google.protobuf.ByteString getPageTokenBytes() { * *
          * If non-empty, `page_token` should contain a
    -     * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] from a
    -     * previous [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
          * 
    * * string page_token = 4; @@ -838,8 +845,9 @@ public Builder setPageToken(java.lang.String value) { * *
          * If non-empty, `page_token` should contain a
    -     * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] from a
    -     * previous [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
          * 
    * * string page_token = 4; @@ -857,8 +865,9 @@ public Builder clearPageToken() { * *
          * If non-empty, `page_token` should contain a
    -     * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] from a
    -     * previous [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
    +     * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token]
    +     * from a previous
    +     * [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
          * 
    * * string page_token = 4; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesRequestOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesRequestOrBuilder.java index 0551fd29182..dab91ea8efb 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface ListDatabasesRequestOrBuilder @@ -74,8 +74,9 @@ public interface ListDatabasesRequestOrBuilder * *
        * If non-empty, `page_token` should contain a
    -   * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] from a
    -   * previous [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
    +   * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token]
    +   * from a previous
    +   * [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
        * 
    * * string page_token = 4; @@ -88,8 +89,9 @@ public interface ListDatabasesRequestOrBuilder * *
        * If non-empty, `page_token` should contain a
    -   * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] from a
    -   * previous [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
    +   * [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token]
    +   * from a previous
    +   * [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
        * 
    * * string page_token = 4; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesResponse.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesResponse.java index bb5cb1c2ffc..a50a16577a5 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesResponse.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesResponse.java @@ -16,14 +16,15 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** * * *
    - * The response for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
    + * The response for
    + * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
      * 
    * * Protobuf type {@code google.spanner.admin.database.v1.ListDatabasesResponse} @@ -144,8 +145,8 @@ public com.google.spanner.admin.database.v1.DatabaseOrBuilder getDatabasesOrBuil * *
        * `next_page_token` can be sent in a subsequent
    -   * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] call to fetch more
    -   * of the matching databases.
    +   * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]
    +   * call to fetch more of the matching databases.
        * 
    * * string next_page_token = 2; @@ -169,8 +170,8 @@ public java.lang.String getNextPageToken() { * *
        * `next_page_token` can be sent in a subsequent
    -   * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] call to fetch more
    -   * of the matching databases.
    +   * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]
    +   * call to fetch more of the matching databases.
        * 
    * * string next_page_token = 2; @@ -365,7 +366,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
    -   * The response for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
    +   * The response for
    +   * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
        * 
    * * Protobuf type {@code google.spanner.admin.database.v1.ListDatabasesResponse} @@ -962,8 +964,8 @@ public com.google.spanner.admin.database.v1.Database.Builder addDatabasesBuilder * *
          * `next_page_token` can be sent in a subsequent
    -     * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] call to fetch more
    -     * of the matching databases.
    +     * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]
    +     * call to fetch more of the matching databases.
          * 
    * * string next_page_token = 2; @@ -986,8 +988,8 @@ public java.lang.String getNextPageToken() { * *
          * `next_page_token` can be sent in a subsequent
    -     * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] call to fetch more
    -     * of the matching databases.
    +     * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]
    +     * call to fetch more of the matching databases.
          * 
    * * string next_page_token = 2; @@ -1010,8 +1012,8 @@ public com.google.protobuf.ByteString getNextPageTokenBytes() { * *
          * `next_page_token` can be sent in a subsequent
    -     * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] call to fetch more
    -     * of the matching databases.
    +     * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]
    +     * call to fetch more of the matching databases.
          * 
    * * string next_page_token = 2; @@ -1033,8 +1035,8 @@ public Builder setNextPageToken(java.lang.String value) { * *
          * `next_page_token` can be sent in a subsequent
    -     * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] call to fetch more
    -     * of the matching databases.
    +     * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]
    +     * call to fetch more of the matching databases.
          * 
    * * string next_page_token = 2; @@ -1052,8 +1054,8 @@ public Builder clearNextPageToken() { * *
          * `next_page_token` can be sent in a subsequent
    -     * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] call to fetch more
    -     * of the matching databases.
    +     * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]
    +     * call to fetch more of the matching databases.
          * 
    * * string next_page_token = 2; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesResponseOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesResponseOrBuilder.java index 41dcbb25569..8ca41b1fd4b 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesResponseOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/ListDatabasesResponseOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface ListDatabasesResponseOrBuilder @@ -81,8 +81,8 @@ public interface ListDatabasesResponseOrBuilder * *
        * `next_page_token` can be sent in a subsequent
    -   * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] call to fetch more
    -   * of the matching databases.
    +   * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]
    +   * call to fetch more of the matching databases.
        * 
    * * string next_page_token = 2; @@ -95,8 +95,8 @@ public interface ListDatabasesResponseOrBuilder * *
        * `next_page_token` can be sent in a subsequent
    -   * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] call to fetch more
    -   * of the matching databases.
    +   * [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]
    +   * call to fetch more of the matching databases.
        * 
    * * string next_page_token = 2; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OperationProgress.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OperationProgress.java index 907ec5d3b86..66fe94c3834 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OperationProgress.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OperationProgress.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/common.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OperationProgressOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OperationProgressOrBuilder.java index c6a5146db88..e3a4979cb15 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OperationProgressOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OperationProgressOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/common.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface OperationProgressOrBuilder diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OptimizeRestoredDatabaseMetadata.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OptimizeRestoredDatabaseMetadata.java index 283ffd4fedf..d65533ce01c 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OptimizeRestoredDatabaseMetadata.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OptimizeRestoredDatabaseMetadata.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OptimizeRestoredDatabaseMetadataOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OptimizeRestoredDatabaseMetadataOrBuilder.java index 0587fd0d134..009ea179158 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OptimizeRestoredDatabaseMetadataOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/OptimizeRestoredDatabaseMetadataOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface OptimizeRestoredDatabaseMetadataOrBuilder diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseEncryptionConfig.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseEncryptionConfig.java index 296a12845dc..f0c457d994f 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseEncryptionConfig.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseEncryptionConfig.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** @@ -42,6 +42,7 @@ private RestoreDatabaseEncryptionConfig( private RestoreDatabaseEncryptionConfig() { encryptionType_ = 0; kmsKeyName_ = ""; + kmsKeyNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); } @java.lang.Override @@ -91,7 +92,8 @@ public enum EncryptionType implements com.google.protobuf.ProtocolMessageEnum { * *
          * This is the default option when
    -     * [encryption_config][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig] is not specified.
    +     * [encryption_config][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig]
    +     * is not specified.
          * 
    * * USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION = 1; @@ -136,7 +138,8 @@ public enum EncryptionType implements com.google.protobuf.ProtocolMessageEnum { * *
          * This is the default option when
    -     * [encryption_config][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig] is not specified.
    +     * [encryption_config][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig]
    +     * is not specified.
          * 
    * * USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION = 1; @@ -303,10 +306,10 @@ public int getEncryptionTypeValue() { * * *
    -   * Optional. The Cloud KMS key that will be used to encrypt/decrypt the restored
    -   * database. This field should be set only when
    -   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] is
    -   * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +   * Optional. The Cloud KMS key that will be used to encrypt/decrypt the
    +   * restored database. This field should be set only when
    +   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +   * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
        * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
        * 
    * @@ -332,10 +335,10 @@ public java.lang.String getKmsKeyName() { * * *
    -   * Optional. The Cloud KMS key that will be used to encrypt/decrypt the restored
    -   * database. This field should be set only when
    -   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] is
    -   * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +   * Optional. The Cloud KMS key that will be used to encrypt/decrypt the
    +   * restored database. This field should be set only when
    +   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +   * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
        * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
        * 
    * @@ -358,6 +361,134 @@ public com.google.protobuf.ByteString getKmsKeyNameBytes() { } } + public static final int KMS_KEY_NAMES_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private com.google.protobuf.LazyStringArrayList kmsKeyNames_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * encrypt the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the kmsKeyNames. + */ + public com.google.protobuf.ProtocolStringList getKmsKeyNamesList() { + return kmsKeyNames_; + } + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * encrypt the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The count of kmsKeyNames. + */ + public int getKmsKeyNamesCount() { + return kmsKeyNames_.size(); + } + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * encrypt the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The kmsKeyNames at the given index. + */ + public java.lang.String getKmsKeyNames(int index) { + return kmsKeyNames_.get(index); + } + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * encrypt the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the kmsKeyNames at the given index. + */ + public com.google.protobuf.ByteString getKmsKeyNamesBytes(int index) { + return kmsKeyNames_.getByteString(index); + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -381,6 +512,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(kmsKeyName_)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 2, kmsKeyName_); } + for (int i = 0; i < kmsKeyNames_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, kmsKeyNames_.getRaw(i)); + } getUnknownFields().writeTo(output); } @@ -399,6 +533,14 @@ public int getSerializedSize() { if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(kmsKeyName_)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, kmsKeyName_); } + { + int dataSize = 0; + for (int i = 0; i < kmsKeyNames_.size(); i++) { + dataSize += computeStringSizeNoTag(kmsKeyNames_.getRaw(i)); + } + size += dataSize; + size += 1 * getKmsKeyNamesList().size(); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -417,6 +559,7 @@ public boolean equals(final java.lang.Object obj) { if (encryptionType_ != other.encryptionType_) return false; if (!getKmsKeyName().equals(other.getKmsKeyName())) return false; + if (!getKmsKeyNamesList().equals(other.getKmsKeyNamesList())) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @@ -432,6 +575,10 @@ public int hashCode() { hash = (53 * hash) + encryptionType_; hash = (37 * hash) + KMS_KEY_NAME_FIELD_NUMBER; hash = (53 * hash) + getKmsKeyName().hashCode(); + if (getKmsKeyNamesCount() > 0) { + hash = (37 * hash) + KMS_KEY_NAMES_FIELD_NUMBER; + hash = (53 * hash) + getKmsKeyNamesList().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -576,6 +723,7 @@ public Builder clear() { bitField0_ = 0; encryptionType_ = 0; kmsKeyName_ = ""; + kmsKeyNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); return this; } @@ -621,6 +769,10 @@ private void buildPartial0( if (((from_bitField0_ & 0x00000002) != 0)) { result.kmsKeyName_ = kmsKeyName_; } + if (((from_bitField0_ & 0x00000004) != 0)) { + kmsKeyNames_.makeImmutable(); + result.kmsKeyNames_ = kmsKeyNames_; + } } @java.lang.Override @@ -680,6 +832,16 @@ public Builder mergeFrom( bitField0_ |= 0x00000002; onChanged(); } + if (!other.kmsKeyNames_.isEmpty()) { + if (kmsKeyNames_.isEmpty()) { + kmsKeyNames_ = other.kmsKeyNames_; + bitField0_ |= 0x00000004; + } else { + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.addAll(other.kmsKeyNames_); + } + onChanged(); + } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; @@ -718,6 +880,13 @@ public Builder mergeFrom( bitField0_ |= 0x00000002; break; } // case 18 + case 26: + { + java.lang.String s = input.readStringRequireUtf8(); + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.add(s); + break; + } // case 26 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { @@ -848,10 +1017,10 @@ public Builder clearEncryptionType() { * * *
    -     * Optional. The Cloud KMS key that will be used to encrypt/decrypt the restored
    -     * database. This field should be set only when
    -     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] is
    -     * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * Optional. The Cloud KMS key that will be used to encrypt/decrypt the
    +     * restored database. This field should be set only when
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
          * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
          * 
    * @@ -876,10 +1045,10 @@ public java.lang.String getKmsKeyName() { * * *
    -     * Optional. The Cloud KMS key that will be used to encrypt/decrypt the restored
    -     * database. This field should be set only when
    -     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] is
    -     * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * Optional. The Cloud KMS key that will be used to encrypt/decrypt the
    +     * restored database. This field should be set only when
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
          * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
          * 
    * @@ -904,10 +1073,10 @@ public com.google.protobuf.ByteString getKmsKeyNameBytes() { * * *
    -     * Optional. The Cloud KMS key that will be used to encrypt/decrypt the restored
    -     * database. This field should be set only when
    -     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] is
    -     * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * Optional. The Cloud KMS key that will be used to encrypt/decrypt the
    +     * restored database. This field should be set only when
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
          * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
          * 
    * @@ -931,10 +1100,10 @@ public Builder setKmsKeyName(java.lang.String value) { * * *
    -     * Optional. The Cloud KMS key that will be used to encrypt/decrypt the restored
    -     * database. This field should be set only when
    -     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] is
    -     * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * Optional. The Cloud KMS key that will be used to encrypt/decrypt the
    +     * restored database. This field should be set only when
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
          * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
          * 
    * @@ -954,10 +1123,10 @@ public Builder clearKmsKeyName() { * * *
    -     * Optional. The Cloud KMS key that will be used to encrypt/decrypt the restored
    -     * database. This field should be set only when
    -     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] is
    -     * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +     * Optional. The Cloud KMS key that will be used to encrypt/decrypt the
    +     * restored database. This field should be set only when
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
          * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
          * 
    * @@ -979,6 +1148,324 @@ public Builder setKmsKeyNameBytes(com.google.protobuf.ByteString value) { return this; } + private com.google.protobuf.LazyStringArrayList kmsKeyNames_ = + com.google.protobuf.LazyStringArrayList.emptyList(); + + private void ensureKmsKeyNamesIsMutable() { + if (!kmsKeyNames_.isModifiable()) { + kmsKeyNames_ = new com.google.protobuf.LazyStringArrayList(kmsKeyNames_); + } + bitField0_ |= 0x00000004; + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * encrypt the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the kmsKeyNames. + */ + public com.google.protobuf.ProtocolStringList getKmsKeyNamesList() { + kmsKeyNames_.makeImmutable(); + return kmsKeyNames_; + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * encrypt the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The count of kmsKeyNames. + */ + public int getKmsKeyNamesCount() { + return kmsKeyNames_.size(); + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * encrypt the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The kmsKeyNames at the given index. + */ + public java.lang.String getKmsKeyNames(int index) { + return kmsKeyNames_.get(index); + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * encrypt the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the kmsKeyNames at the given index. + */ + public com.google.protobuf.ByteString getKmsKeyNamesBytes(int index) { + return kmsKeyNames_.getByteString(index); + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * encrypt the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index to set the value at. + * @param value The kmsKeyNames to set. + * @return This builder for chaining. + */ + public Builder setKmsKeyNames(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.set(index, value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * encrypt the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The kmsKeyNames to add. + * @return This builder for chaining. + */ + public Builder addKmsKeyNames(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * encrypt the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param values The kmsKeyNames to add. + * @return This builder for chaining. + */ + public Builder addAllKmsKeyNames(java.lang.Iterable values) { + ensureKmsKeyNamesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, kmsKeyNames_); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * encrypt the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearKmsKeyNames() { + kmsKeyNames_ = com.google.protobuf.LazyStringArrayList.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + ; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. Specifies the KMS configuration for the one or more keys used to
    +     * encrypt the database. Values are of the form
    +     * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +     *
    +     * The keys referenced by kms_key_names must fully cover all
    +     * regions of the database instance configuration. Some examples:
    +     * * For single region database instance configs, specify a single regional
    +     * location KMS key.
    +     * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +     * either specify a multi-regional location KMS key or multiple regional
    +     * location KMS keys that cover all regions in the instance config.
    +     * * For a database instance config of type USER_MANAGED, please specify only
    +     * regional location KMS keys to cover each region in the instance config.
    +     * Multi-regional location KMS keys are not supported for USER_MANAGED
    +     * instance configs.
    +     * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes of the kmsKeyNames to add. + * @return This builder for chaining. + */ + public Builder addKmsKeyNamesBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureKmsKeyNamesIsMutable(); + kmsKeyNames_.add(value); + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseEncryptionConfigOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseEncryptionConfigOrBuilder.java index 04d44766c16..c7d3e06e9d5 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseEncryptionConfigOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseEncryptionConfigOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface RestoreDatabaseEncryptionConfigOrBuilder @@ -58,10 +58,10 @@ public interface RestoreDatabaseEncryptionConfigOrBuilder * * *
    -   * Optional. The Cloud KMS key that will be used to encrypt/decrypt the restored
    -   * database. This field should be set only when
    -   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] is
    -   * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +   * Optional. The Cloud KMS key that will be used to encrypt/decrypt the
    +   * restored database. This field should be set only when
    +   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +   * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
        * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
        * 
    * @@ -76,10 +76,10 @@ public interface RestoreDatabaseEncryptionConfigOrBuilder * * *
    -   * Optional. The Cloud KMS key that will be used to encrypt/decrypt the restored
    -   * database. This field should be set only when
    -   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] is
    -   * `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
    +   * Optional. The Cloud KMS key that will be used to encrypt/decrypt the
    +   * restored database. This field should be set only when
    +   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +   * is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
        * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
        * 
    * @@ -90,4 +90,119 @@ public interface RestoreDatabaseEncryptionConfigOrBuilder * @return The bytes for kmsKeyName. */ com.google.protobuf.ByteString getKmsKeyNameBytes(); + + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * encrypt the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return A list containing the kmsKeyNames. + */ + java.util.List getKmsKeyNamesList(); + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * encrypt the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @return The count of kmsKeyNames. + */ + int getKmsKeyNamesCount(); + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * encrypt the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the element to return. + * @return The kmsKeyNames at the given index. + */ + java.lang.String getKmsKeyNames(int index); + /** + * + * + *
    +   * Optional. Specifies the KMS configuration for the one or more keys used to
    +   * encrypt the database. Values are of the form
    +   * `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
    +   *
    +   * The keys referenced by kms_key_names must fully cover all
    +   * regions of the database instance configuration. Some examples:
    +   * * For single region database instance configs, specify a single regional
    +   * location KMS key.
    +   * * For multi-regional database instance configs of type GOOGLE_MANAGED,
    +   * either specify a multi-regional location KMS key or multiple regional
    +   * location KMS keys that cover all regions in the instance config.
    +   * * For a database instance config of type USER_MANAGED, please specify only
    +   * regional location KMS keys to cover each region in the instance config.
    +   * Multi-regional location KMS keys are not supported for USER_MANAGED
    +   * instance configs.
    +   * 
    + * + * + * repeated string kms_key_names = 3 [(.google.api.field_behavior) = OPTIONAL, (.google.api.resource_reference) = { ... } + * + * + * @param index The index of the value to return. + * @return The bytes of the kmsKeyNames at the given index. + */ + com.google.protobuf.ByteString getKmsKeyNamesBytes(int index); } diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseMetadata.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseMetadata.java index 68896869e46..bfdb45d5c31 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseMetadata.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseMetadata.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** @@ -324,7 +324,8 @@ public com.google.spanner.admin.database.v1.OperationProgressOrBuilder getProgre * operation completed despite cancellation. On successful cancellation, * the operation is not deleted; instead, it becomes an operation with * an [Operation.error][google.longrunning.Operation.error] value with a - * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to `Code.CANCELLED`. + * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to + * `Code.CANCELLED`. *
    * * .google.protobuf.Timestamp cancel_time = 5; @@ -349,7 +350,8 @@ public boolean hasCancelTime() { * operation completed despite cancellation. On successful cancellation, * the operation is not deleted; instead, it becomes an operation with * an [Operation.error][google.longrunning.Operation.error] value with a - * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to `Code.CANCELLED`. + * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to + * `Code.CANCELLED`. *
    * * .google.protobuf.Timestamp cancel_time = 5; @@ -374,7 +376,8 @@ public com.google.protobuf.Timestamp getCancelTime() { * operation completed despite cancellation. On successful cancellation, * the operation is not deleted; instead, it becomes an operation with * an [Operation.error][google.longrunning.Operation.error] value with a - * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to `Code.CANCELLED`. + * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to + * `Code.CANCELLED`. *
    * * .google.protobuf.Timestamp cancel_time = 5; @@ -399,10 +402,10 @@ public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>` * where the <database> is the name of database being created and restored to. * The metadata type of the long-running operation is - * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. This long-running operation will be - * automatically created by the system after the RestoreDatabase long-running - * operation completes successfully. This operation will not be created if the - * restore was not successful. + * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. + * This long-running operation will be automatically created by the system + * after the RestoreDatabase long-running operation completes successfully. + * This operation will not be created if the restore was not successful. *
    * * string optimize_database_operation_name = 6; @@ -432,10 +435,10 @@ public java.lang.String getOptimizeDatabaseOperationName() { * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>` * where the <database> is the name of database being created and restored to. * The metadata type of the long-running operation is - * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. This long-running operation will be - * automatically created by the system after the RestoreDatabase long-running - * operation completes successfully. This operation will not be created if the - * restore was not successful. + * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. + * This long-running operation will be automatically created by the system + * after the RestoreDatabase long-running operation completes successfully. + * This operation will not be created if the restore was not successful. *
    * * string optimize_database_operation_name = 6; @@ -1632,7 +1635,8 @@ public com.google.spanner.admin.database.v1.OperationProgressOrBuilder getProgre * operation completed despite cancellation. On successful cancellation, * the operation is not deleted; instead, it becomes an operation with * an [Operation.error][google.longrunning.Operation.error] value with a - * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to `Code.CANCELLED`. + * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to + * `Code.CANCELLED`. *
    * * .google.protobuf.Timestamp cancel_time = 5; @@ -1656,7 +1660,8 @@ public boolean hasCancelTime() { * operation completed despite cancellation. On successful cancellation, * the operation is not deleted; instead, it becomes an operation with * an [Operation.error][google.longrunning.Operation.error] value with a - * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to `Code.CANCELLED`. + * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to + * `Code.CANCELLED`. *
    * * .google.protobuf.Timestamp cancel_time = 5; @@ -1686,7 +1691,8 @@ public com.google.protobuf.Timestamp getCancelTime() { * operation completed despite cancellation. On successful cancellation, * the operation is not deleted; instead, it becomes an operation with * an [Operation.error][google.longrunning.Operation.error] value with a - * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to `Code.CANCELLED`. + * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to + * `Code.CANCELLED`. *
    * * .google.protobuf.Timestamp cancel_time = 5; @@ -1718,7 +1724,8 @@ public Builder setCancelTime(com.google.protobuf.Timestamp value) { * operation completed despite cancellation. On successful cancellation, * the operation is not deleted; instead, it becomes an operation with * an [Operation.error][google.longrunning.Operation.error] value with a - * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to `Code.CANCELLED`. + * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to + * `Code.CANCELLED`. *
    * * .google.protobuf.Timestamp cancel_time = 5; @@ -1747,7 +1754,8 @@ public Builder setCancelTime(com.google.protobuf.Timestamp.Builder builderForVal * operation completed despite cancellation. On successful cancellation, * the operation is not deleted; instead, it becomes an operation with * an [Operation.error][google.longrunning.Operation.error] value with a - * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to `Code.CANCELLED`. + * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to + * `Code.CANCELLED`. *
    * * .google.protobuf.Timestamp cancel_time = 5; @@ -1784,7 +1792,8 @@ public Builder mergeCancelTime(com.google.protobuf.Timestamp value) { * operation completed despite cancellation. On successful cancellation, * the operation is not deleted; instead, it becomes an operation with * an [Operation.error][google.longrunning.Operation.error] value with a - * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to `Code.CANCELLED`. + * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to + * `Code.CANCELLED`. *
    * * .google.protobuf.Timestamp cancel_time = 5; @@ -1813,7 +1822,8 @@ public Builder clearCancelTime() { * operation completed despite cancellation. On successful cancellation, * the operation is not deleted; instead, it becomes an operation with * an [Operation.error][google.longrunning.Operation.error] value with a - * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to `Code.CANCELLED`. + * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to + * `Code.CANCELLED`. *
    * * .google.protobuf.Timestamp cancel_time = 5; @@ -1837,7 +1847,8 @@ public com.google.protobuf.Timestamp.Builder getCancelTimeBuilder() { * operation completed despite cancellation. On successful cancellation, * the operation is not deleted; instead, it becomes an operation with * an [Operation.error][google.longrunning.Operation.error] value with a - * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to `Code.CANCELLED`. + * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to + * `Code.CANCELLED`. *
    * * .google.protobuf.Timestamp cancel_time = 5; @@ -1865,7 +1876,8 @@ public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { * operation completed despite cancellation. On successful cancellation, * the operation is not deleted; instead, it becomes an operation with * an [Operation.error][google.longrunning.Operation.error] value with a - * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to `Code.CANCELLED`. + * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to + * `Code.CANCELLED`. *
    * * .google.protobuf.Timestamp cancel_time = 5; @@ -1899,10 +1911,10 @@ public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>` * where the <database> is the name of database being created and restored to. * The metadata type of the long-running operation is - * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. This long-running operation will be - * automatically created by the system after the RestoreDatabase long-running - * operation completes successfully. This operation will not be created if the - * restore was not successful. + * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. + * This long-running operation will be automatically created by the system + * after the RestoreDatabase long-running operation completes successfully. + * This operation will not be created if the restore was not successful. *
    * * string optimize_database_operation_name = 6; @@ -1931,10 +1943,10 @@ public java.lang.String getOptimizeDatabaseOperationName() { * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>` * where the <database> is the name of database being created and restored to. * The metadata type of the long-running operation is - * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. This long-running operation will be - * automatically created by the system after the RestoreDatabase long-running - * operation completes successfully. This operation will not be created if the - * restore was not successful. + * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. + * This long-running operation will be automatically created by the system + * after the RestoreDatabase long-running operation completes successfully. + * This operation will not be created if the restore was not successful. *
    * * string optimize_database_operation_name = 6; @@ -1963,10 +1975,10 @@ public com.google.protobuf.ByteString getOptimizeDatabaseOperationNameBytes() { * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>` * where the <database> is the name of database being created and restored to. * The metadata type of the long-running operation is - * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. This long-running operation will be - * automatically created by the system after the RestoreDatabase long-running - * operation completes successfully. This operation will not be created if the - * restore was not successful. + * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. + * This long-running operation will be automatically created by the system + * after the RestoreDatabase long-running operation completes successfully. + * This operation will not be created if the restore was not successful. *
    * * string optimize_database_operation_name = 6; @@ -1994,10 +2006,10 @@ public Builder setOptimizeDatabaseOperationName(java.lang.String value) { * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>` * where the <database> is the name of database being created and restored to. * The metadata type of the long-running operation is - * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. This long-running operation will be - * automatically created by the system after the RestoreDatabase long-running - * operation completes successfully. This operation will not be created if the - * restore was not successful. + * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. + * This long-running operation will be automatically created by the system + * after the RestoreDatabase long-running operation completes successfully. + * This operation will not be created if the restore was not successful. *
    * * string optimize_database_operation_name = 6; @@ -2021,10 +2033,10 @@ public Builder clearOptimizeDatabaseOperationName() { * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>` * where the <database> is the name of database being created and restored to. * The metadata type of the long-running operation is - * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. This long-running operation will be - * automatically created by the system after the RestoreDatabase long-running - * operation completes successfully. This operation will not be created if the - * restore was not successful. + * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. + * This long-running operation will be automatically created by the system + * after the RestoreDatabase long-running operation completes successfully. + * This operation will not be created if the restore was not successful. *
    * * string optimize_database_operation_name = 6; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseMetadataOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseMetadataOrBuilder.java index 1265196c52f..eea3dade4a0 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseMetadataOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseMetadataOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface RestoreDatabaseMetadataOrBuilder @@ -164,7 +164,8 @@ public interface RestoreDatabaseMetadataOrBuilder * operation completed despite cancellation. On successful cancellation, * the operation is not deleted; instead, it becomes an operation with * an [Operation.error][google.longrunning.Operation.error] value with a - * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to `Code.CANCELLED`. + * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to + * `Code.CANCELLED`. *
    * * .google.protobuf.Timestamp cancel_time = 5; @@ -186,7 +187,8 @@ public interface RestoreDatabaseMetadataOrBuilder * operation completed despite cancellation. On successful cancellation, * the operation is not deleted; instead, it becomes an operation with * an [Operation.error][google.longrunning.Operation.error] value with a - * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to `Code.CANCELLED`. + * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to + * `Code.CANCELLED`. *
    * * .google.protobuf.Timestamp cancel_time = 5; @@ -208,7 +210,8 @@ public interface RestoreDatabaseMetadataOrBuilder * operation completed despite cancellation. On successful cancellation, * the operation is not deleted; instead, it becomes an operation with * an [Operation.error][google.longrunning.Operation.error] value with a - * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to `Code.CANCELLED`. + * [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to + * `Code.CANCELLED`. *
    * * .google.protobuf.Timestamp cancel_time = 5; @@ -226,10 +229,10 @@ public interface RestoreDatabaseMetadataOrBuilder * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>` * where the <database> is the name of database being created and restored to. * The metadata type of the long-running operation is - * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. This long-running operation will be - * automatically created by the system after the RestoreDatabase long-running - * operation completes successfully. This operation will not be created if the - * restore was not successful. + * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. + * This long-running operation will be automatically created by the system + * after the RestoreDatabase long-running operation completes successfully. + * This operation will not be created if the restore was not successful. *
    * * string optimize_database_operation_name = 6; @@ -248,10 +251,10 @@ public interface RestoreDatabaseMetadataOrBuilder * `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>` * where the <database> is the name of database being created and restored to. * The metadata type of the long-running operation is - * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. This long-running operation will be - * automatically created by the system after the RestoreDatabase long-running - * operation completes successfully. This operation will not be created if the - * restore was not successful. + * [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. + * This long-running operation will be automatically created by the system + * after the RestoreDatabase long-running operation completes successfully. + * This operation will not be created if the restore was not successful. *
    * * string optimize_database_operation_name = 6; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseRequest.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseRequest.java index 17e2e870e38..9ecd7015fed 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseRequest.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** @@ -311,12 +311,12 @@ public com.google.protobuf.ByteString getBackupBytes() { * * *
    -   * Optional. An encryption configuration describing the encryption type and key
    -   * resources in Cloud KMS used to encrypt/decrypt the database to restore to.
    -   * If this field is not specified, the restored database will use
    -   * the same encryption configuration as the backup by default, namely
    -   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] =
    -   * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +   * Optional. An encryption configuration describing the encryption type and
    +   * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +   * to. If this field is not specified, the restored database will use the same
    +   * encryption configuration as the backup by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +   * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
        * 
    * * @@ -333,12 +333,12 @@ public boolean hasEncryptionConfig() { * * *
    -   * Optional. An encryption configuration describing the encryption type and key
    -   * resources in Cloud KMS used to encrypt/decrypt the database to restore to.
    -   * If this field is not specified, the restored database will use
    -   * the same encryption configuration as the backup by default, namely
    -   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] =
    -   * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +   * Optional. An encryption configuration describing the encryption type and
    +   * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +   * to. If this field is not specified, the restored database will use the same
    +   * encryption configuration as the backup by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +   * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
        * 
    * * @@ -358,12 +358,12 @@ public boolean hasEncryptionConfig() { * * *
    -   * Optional. An encryption configuration describing the encryption type and key
    -   * resources in Cloud KMS used to encrypt/decrypt the database to restore to.
    -   * If this field is not specified, the restored database will use
    -   * the same encryption configuration as the backup by default, namely
    -   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] =
    -   * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +   * Optional. An encryption configuration describing the encryption type and
    +   * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +   * to. If this field is not specified, the restored database will use the same
    +   * encryption configuration as the backup by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +   * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
        * 
    * * @@ -1263,12 +1263,12 @@ public Builder setBackupBytes(com.google.protobuf.ByteString value) { * * *
    -     * Optional. An encryption configuration describing the encryption type and key
    -     * resources in Cloud KMS used to encrypt/decrypt the database to restore to.
    -     * If this field is not specified, the restored database will use
    -     * the same encryption configuration as the backup by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] =
    -     * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * Optional. An encryption configuration describing the encryption type and
    +     * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +     * to. If this field is not specified, the restored database will use the same
    +     * encryption configuration as the backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
          * 
    * * @@ -1284,12 +1284,12 @@ public boolean hasEncryptionConfig() { * * *
    -     * Optional. An encryption configuration describing the encryption type and key
    -     * resources in Cloud KMS used to encrypt/decrypt the database to restore to.
    -     * If this field is not specified, the restored database will use
    -     * the same encryption configuration as the backup by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] =
    -     * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * Optional. An encryption configuration describing the encryption type and
    +     * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +     * to. If this field is not specified, the restored database will use the same
    +     * encryption configuration as the backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
          * 
    * * @@ -1313,12 +1313,12 @@ public boolean hasEncryptionConfig() { * * *
    -     * Optional. An encryption configuration describing the encryption type and key
    -     * resources in Cloud KMS used to encrypt/decrypt the database to restore to.
    -     * If this field is not specified, the restored database will use
    -     * the same encryption configuration as the backup by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] =
    -     * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * Optional. An encryption configuration describing the encryption type and
    +     * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +     * to. If this field is not specified, the restored database will use the same
    +     * encryption configuration as the backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
          * 
    * * @@ -1343,12 +1343,12 @@ public Builder setEncryptionConfig( * * *
    -     * Optional. An encryption configuration describing the encryption type and key
    -     * resources in Cloud KMS used to encrypt/decrypt the database to restore to.
    -     * If this field is not specified, the restored database will use
    -     * the same encryption configuration as the backup by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] =
    -     * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * Optional. An encryption configuration describing the encryption type and
    +     * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +     * to. If this field is not specified, the restored database will use the same
    +     * encryption configuration as the backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
          * 
    * * @@ -1371,12 +1371,12 @@ public Builder setEncryptionConfig( * * *
    -     * Optional. An encryption configuration describing the encryption type and key
    -     * resources in Cloud KMS used to encrypt/decrypt the database to restore to.
    -     * If this field is not specified, the restored database will use
    -     * the same encryption configuration as the backup by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] =
    -     * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * Optional. An encryption configuration describing the encryption type and
    +     * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +     * to. If this field is not specified, the restored database will use the same
    +     * encryption configuration as the backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
          * 
    * * @@ -1408,12 +1408,12 @@ public Builder mergeEncryptionConfig( * * *
    -     * Optional. An encryption configuration describing the encryption type and key
    -     * resources in Cloud KMS used to encrypt/decrypt the database to restore to.
    -     * If this field is not specified, the restored database will use
    -     * the same encryption configuration as the backup by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] =
    -     * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * Optional. An encryption configuration describing the encryption type and
    +     * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +     * to. If this field is not specified, the restored database will use the same
    +     * encryption configuration as the backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
          * 
    * * @@ -1434,12 +1434,12 @@ public Builder clearEncryptionConfig() { * * *
    -     * Optional. An encryption configuration describing the encryption type and key
    -     * resources in Cloud KMS used to encrypt/decrypt the database to restore to.
    -     * If this field is not specified, the restored database will use
    -     * the same encryption configuration as the backup by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] =
    -     * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * Optional. An encryption configuration describing the encryption type and
    +     * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +     * to. If this field is not specified, the restored database will use the same
    +     * encryption configuration as the backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
          * 
    * * @@ -1456,12 +1456,12 @@ public Builder clearEncryptionConfig() { * * *
    -     * Optional. An encryption configuration describing the encryption type and key
    -     * resources in Cloud KMS used to encrypt/decrypt the database to restore to.
    -     * If this field is not specified, the restored database will use
    -     * the same encryption configuration as the backup by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] =
    -     * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * Optional. An encryption configuration describing the encryption type and
    +     * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +     * to. If this field is not specified, the restored database will use the same
    +     * encryption configuration as the backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
          * 
    * * @@ -1483,12 +1483,12 @@ public Builder clearEncryptionConfig() { * * *
    -     * Optional. An encryption configuration describing the encryption type and key
    -     * resources in Cloud KMS used to encrypt/decrypt the database to restore to.
    -     * If this field is not specified, the restored database will use
    -     * the same encryption configuration as the backup by default, namely
    -     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] =
    -     * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +     * Optional. An encryption configuration describing the encryption type and
    +     * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +     * to. If this field is not specified, the restored database will use the same
    +     * encryption configuration as the backup by default, namely
    +     * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +     * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
          * 
    * * diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseRequestOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseRequestOrBuilder.java index b80692a5d44..afebab0ff5b 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreDatabaseRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface RestoreDatabaseRequestOrBuilder @@ -136,12 +136,12 @@ public interface RestoreDatabaseRequestOrBuilder * * *
    -   * Optional. An encryption configuration describing the encryption type and key
    -   * resources in Cloud KMS used to encrypt/decrypt the database to restore to.
    -   * If this field is not specified, the restored database will use
    -   * the same encryption configuration as the backup by default, namely
    -   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] =
    -   * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +   * Optional. An encryption configuration describing the encryption type and
    +   * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +   * to. If this field is not specified, the restored database will use the same
    +   * encryption configuration as the backup by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +   * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
        * 
    * * @@ -155,12 +155,12 @@ public interface RestoreDatabaseRequestOrBuilder * * *
    -   * Optional. An encryption configuration describing the encryption type and key
    -   * resources in Cloud KMS used to encrypt/decrypt the database to restore to.
    -   * If this field is not specified, the restored database will use
    -   * the same encryption configuration as the backup by default, namely
    -   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] =
    -   * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +   * Optional. An encryption configuration describing the encryption type and
    +   * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +   * to. If this field is not specified, the restored database will use the same
    +   * encryption configuration as the backup by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +   * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
        * 
    * * @@ -174,12 +174,12 @@ public interface RestoreDatabaseRequestOrBuilder * * *
    -   * Optional. An encryption configuration describing the encryption type and key
    -   * resources in Cloud KMS used to encrypt/decrypt the database to restore to.
    -   * If this field is not specified, the restored database will use
    -   * the same encryption configuration as the backup by default, namely
    -   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] =
    -   * `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
    +   * Optional. An encryption configuration describing the encryption type and
    +   * key resources in Cloud KMS used to encrypt/decrypt the database to restore
    +   * to. If this field is not specified, the restored database will use the same
    +   * encryption configuration as the backup by default, namely
    +   * [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
    +   * = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
        * 
    * * diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreInfo.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreInfo.java index 29d428a863f..8714ff1403d 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreInfo.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreInfo.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreInfoOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreInfoOrBuilder.java index 1419ad13b7d..d7cb3218847 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreInfoOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreInfoOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface RestoreInfoOrBuilder diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreSourceType.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreSourceType.java index d8bd9aaaf03..69686954ee7 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreSourceType.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/RestoreSourceType.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/SpannerDatabaseAdminProto.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/SpannerDatabaseAdminProto.java index 858bfefa32d..9ebb5dfd07d 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/SpannerDatabaseAdminProto.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/SpannerDatabaseAdminProto.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public final class SpannerDatabaseAdminProto { @@ -144,260 +144,299 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "otobuf/empty.proto\032 google/protobuf/fiel" + "d_mask.proto\032\037google/protobuf/timestamp." + "proto\032-google/spanner/admin/database/v1/" - + "backup.proto\032-google/spanner/admin/datab" - + "ase/v1/common.proto\"\253\001\n\013RestoreInfo\022H\n\013s" - + "ource_type\030\001 \001(\01623.google.spanner.admin." - + "database.v1.RestoreSourceType\022C\n\013backup_" - + "info\030\002 \001(\0132,.google.spanner.admin.databa" - + "se.v1.BackupInfoH\000B\r\n\013source_info\"\312\006\n\010Da" - + "tabase\022\021\n\004name\030\001 \001(\tB\003\340A\002\022D\n\005state\030\002 \001(\016" - + "20.google.spanner.admin.database.v1.Data" - + "base.StateB\003\340A\003\0224\n\013create_time\030\003 \001(\0132\032.g" - + "oogle.protobuf.TimestampB\003\340A\003\022H\n\014restore" - + "_info\030\004 \001(\0132-.google.spanner.admin.datab" - + "ase.v1.RestoreInfoB\003\340A\003\022R\n\021encryption_co" - + "nfig\030\005 \001(\01322.google.spanner.admin.databa" - + "se.v1.EncryptionConfigB\003\340A\003\022N\n\017encryptio" - + "n_info\030\010 \003(\01320.google.spanner.admin.data" - + "base.v1.EncryptionInfoB\003\340A\003\022%\n\030version_r" - + "etention_period\030\006 \001(\tB\003\340A\003\022>\n\025earliest_v" - + "ersion_time\030\007 \001(\0132\032.google.protobuf.Time" - + "stampB\003\340A\003\022\033\n\016default_leader\030\t \001(\tB\003\340A\003\022" - + "P\n\020database_dialect\030\n \001(\01621.google.spann" - + "er.admin.database.v1.DatabaseDialectB\003\340A" - + "\003\022\036\n\026enable_drop_protection\030\013 \001(\010\022\030\n\013rec" - + "onciling\030\014 \001(\010B\003\340A\003\"M\n\005State\022\025\n\021STATE_UN" - + "SPECIFIED\020\000\022\014\n\010CREATING\020\001\022\t\n\005READY\020\002\022\024\n\020" - + "READY_OPTIMIZING\020\003:b\352A_\n\037spanner.googlea" - + "pis.com/Database\022\n\025earliest_version_time\030\007 \001(\0132\032.goog" + + "le.protobuf.TimestampB\003\340A\003\022\033\n\016default_le" + + "ader\030\t \001(\tB\003\340A\003\022P\n\020database_dialect\030\n \001(" + + "\01621.google.spanner.admin.database.v1.Dat" + + "abaseDialectB\003\340A\003\022\036\n\026enable_drop_protect" + + "ion\030\013 \001(\010\022\030\n\013reconciling\030\014 \001(\010B\003\340A\003\"M\n\005S" + + "tate\022\025\n\021STATE_UNSPECIFIED\020\000\022\014\n\010CREATING\020" + + "\001\022\t\n\005READY\020\002\022\024\n\020READY_OPTIMIZING\020\003:b\352A_\n" + + "\037spanner.googleapis.com/Database\022\332A\006parent\202\323\344\223\002/\022-/v1/{parent=projects" - + "/*/instances/*}/databases\022\244\002\n\016CreateData" - + "base\0227.google.spanner.admin.database.v1." - + "CreateDatabaseRequest\032\035.google.longrunni" - + "ng.Operation\"\271\001\312Ad\n)google.spanner.admin" - + ".database.v1.Database\0227google.spanner.ad" - + "min.database.v1.CreateDatabaseMetadata\332A" - + "\027parent,create_statement\202\323\344\223\0022\"-/v1/{par" - + "ent=projects/*/instances/*}/databases:\001*" - + "\022\255\001\n\013GetDatabase\0224.google.spanner.admin." - + "database.v1.GetDatabaseRequest\032*.google." - + "spanner.admin.database.v1.Database\"<\332A\004n" - + "ame\202\323\344\223\002/\022-/v1/{name=projects/*/instance" - + "s/*/databases/*}\022\357\001\n\016UpdateDatabase\0227.go" - + "ogle.spanner.admin.database.v1.UpdateDat" - + "abaseRequest\032\035.google.longrunning.Operat" - + "ion\"\204\001\312A\"\n\010Database\022\026UpdateDatabaseMetad" - + "ata\332A\024database,update_mask\202\323\344\223\002B26/v1/{d" - + "atabase.name=projects/*/instances/*/data" - + "bases/*}:\010database\022\235\002\n\021UpdateDatabaseDdl" - + "\022:.google.spanner.admin.database.v1.Upda" - + "teDatabaseDdlRequest\032\035.google.longrunnin" - + "g.Operation\"\254\001\312AS\n\025google.protobuf.Empty" - + "\022:google.spanner.admin.database.v1.Updat" - + "eDatabaseDdlMetadata\332A\023database,statemen" - + "ts\202\323\344\223\002:25/v1/{database=projects/*/insta" - + "nces/*/databases/*}/ddl:\001*\022\243\001\n\014DropDatab" - + "ase\0225.google.spanner.admin.database.v1.D" - + "ropDatabaseRequest\032\026.google.protobuf.Emp" - + "ty\"D\332A\010database\202\323\344\223\0023*1/v1/{database=pro" - + "jects/*/instances/*/databases/*}\022\315\001\n\016Get" - + "DatabaseDdl\0227.google.spanner.admin.datab" - + "ase.v1.GetDatabaseDdlRequest\0328.google.sp" - + "anner.admin.database.v1.GetDatabaseDdlRe" - + "sponse\"H\332A\010database\202\323\344\223\0027\0225/v1/{database" - + "=projects/*/instances/*/databases/*}/ddl" - + "\022\353\001\n\014SetIamPolicy\022\".google.iam.v1.SetIam" - + "PolicyRequest\032\025.google.iam.v1.Policy\"\237\001\332" - + "A\017resource,policy\202\323\344\223\002\206\001\">/v1/{resource=" - + "projects/*/instances/*/databases/*}:setI" + + "\030\n\013database_id\030\002 \001(\tB\003\340A\002\0224\n\006backup\030\003 \001(" + + "\tB\"\372A\037\n\035spanner.googleapis.com/BackupH\000\022" + + "a\n\021encryption_config\030\004 \001(\0132A.google.span" + + "ner.admin.database.v1.RestoreDatabaseEnc" + + "ryptionConfigB\003\340A\001B\010\n\006source\"\265\003\n\037Restore" + + "DatabaseEncryptionConfig\022n\n\017encryption_t" + + "ype\030\001 \001(\0162P.google.spanner.admin.databas" + + "e.v1.RestoreDatabaseEncryptionConfig.Enc" + + "ryptionTypeB\003\340A\002\022?\n\014kms_key_name\030\002 \001(\tB)" + + "\340A\001\372A#\n!cloudkms.googleapis.com/CryptoKe" + + "y\022@\n\rkms_key_names\030\003 \003(\tB)\340A\001\372A#\n!cloudk" + + "ms.googleapis.com/CryptoKey\"\236\001\n\016Encrypti" + + "onType\022\037\n\033ENCRYPTION_TYPE_UNSPECIFIED\020\000\022" + + "+\n\'USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTI" + + "ON\020\001\022\035\n\031GOOGLE_DEFAULT_ENCRYPTION\020\002\022\037\n\033C" + + "USTOMER_MANAGED_ENCRYPTION\020\003\"\215\003\n\027Restore" + + "DatabaseMetadata\0222\n\004name\030\001 \001(\tB$\372A!\n\037spa" + + "nner.googleapis.com/Database\022H\n\013source_t" + + "ype\030\002 \001(\01623.google.spanner.admin.databas" + + "e.v1.RestoreSourceType\022C\n\013backup_info\030\003 " + + "\001(\0132,.google.spanner.admin.database.v1.B" + + "ackupInfoH\000\022E\n\010progress\030\004 \001(\01323.google.s" + + "panner.admin.database.v1.OperationProgre" + + "ss\022/\n\013cancel_time\030\005 \001(\0132\032.google.protobu" + + "f.Timestamp\022(\n optimize_database_operati" + + "on_name\030\006 \001(\tB\r\n\013source_info\"\235\001\n Optimiz" + + "eRestoredDatabaseMetadata\0222\n\004name\030\001 \001(\tB" + + "$\372A!\n\037spanner.googleapis.com/Database\022E\n" + + "\010progress\030\002 \001(\01323.google.spanner.admin.d" + + "atabase.v1.OperationProgress\"\236\001\n\014Databas" + + "eRole\022\021\n\004name\030\001 \001(\tB\003\340A\002:{\352Ax\n#spanner.g" + + "oogleapis.com/DatabaseRole\022Qprojects/{pr" + + "oject}/instances/{instance}/databases/{d" + + "atabase}/databaseRoles/{role}\"z\n\030ListDat" + + "abaseRolesRequest\0227\n\006parent\030\001 \001(\tB\'\340A\002\372A" + + "!\n\037spanner.googleapis.com/Database\022\021\n\tpa" + + "ge_size\030\002 \001(\005\022\022\n\npage_token\030\003 \001(\t\"|\n\031Lis" + + "tDatabaseRolesResponse\022F\n\016database_roles" + + "\030\001 \003(\0132..google.spanner.admin.database.v" + + "1.DatabaseRole\022\027\n\017next_page_token\030\002 \001(\t*" + + "5\n\021RestoreSourceType\022\024\n\020TYPE_UNSPECIFIED" + + "\020\000\022\n\n\006BACKUP\020\0012\2301\n\rDatabaseAdmin\022\300\001\n\rLis" + + "tDatabases\0226.google.spanner.admin.databa" + + "se.v1.ListDatabasesRequest\0327.google.span" + + "ner.admin.database.v1.ListDatabasesRespo" + + "nse\">\332A\006parent\202\323\344\223\002/\022-/v1/{parent=projec" + + "ts/*/instances/*}/databases\022\244\002\n\016CreateDa" + + "tabase\0227.google.spanner.admin.database.v" + + "1.CreateDatabaseRequest\032\035.google.longrun" + + "ning.Operation\"\271\001\312Ad\n)google.spanner.adm" + + "in.database.v1.Database\0227google.spanner." + + "admin.database.v1.CreateDatabaseMetadata" + + "\332A\027parent,create_statement\202\323\344\223\0022\"-/v1/{p" + + "arent=projects/*/instances/*}/databases:" + + "\001*\022\255\001\n\013GetDatabase\0224.google.spanner.admi" + + "n.database.v1.GetDatabaseRequest\032*.googl" + + "e.spanner.admin.database.v1.Database\"<\332A" + + "\004name\202\323\344\223\002/\022-/v1/{name=projects/*/instan" + + "ces/*/databases/*}\022\357\001\n\016UpdateDatabase\0227." + + "google.spanner.admin.database.v1.UpdateD" + + "atabaseRequest\032\035.google.longrunning.Oper" + + "ation\"\204\001\312A\"\n\010Database\022\026UpdateDatabaseMet" + + "adata\332A\024database,update_mask\202\323\344\223\002B26/v1/" + + "{database.name=projects/*/instances/*/da" + + "tabases/*}:\010database\022\235\002\n\021UpdateDatabaseD" + + "dl\022:.google.spanner.admin.database.v1.Up" + + "dateDatabaseDdlRequest\032\035.google.longrunn" + + "ing.Operation\"\254\001\312AS\n\025google.protobuf.Emp" + + "ty\022:google.spanner.admin.database.v1.Upd" + + "ateDatabaseDdlMetadata\332A\023database,statem" + + "ents\202\323\344\223\002:25/v1/{database=projects/*/ins" + + "tances/*/databases/*}/ddl:\001*\022\243\001\n\014DropDat" + + "abase\0225.google.spanner.admin.database.v1" + + ".DropDatabaseRequest\032\026.google.protobuf.E" + + "mpty\"D\332A\010database\202\323\344\223\0023*1/v1/{database=p" + + "rojects/*/instances/*/databases/*}\022\315\001\n\016G" + + "etDatabaseDdl\0227.google.spanner.admin.dat" + + "abase.v1.GetDatabaseDdlRequest\0328.google." + + "spanner.admin.database.v1.GetDatabaseDdl" + + "Response\"H\332A\010database\202\323\344\223\0027\0225/v1/{databa" + + "se=projects/*/instances/*/databases/*}/d" + + "dl\022\302\002\n\014SetIamPolicy\022\".google.iam.v1.SetI" + + "amPolicyRequest\032\025.google.iam.v1.Policy\"\366" + + "\001\332A\017resource,policy\202\323\344\223\002\335\001\">/v1/{resourc" + + "e=projects/*/instances/*/databases/*}:se" + + "tIamPolicy:\001*ZA\"/v1/{resource=" + + "projects/*/instances/*/databases/*}:getI" + "amPolicy:\001*ZA\"/v1/{resource=projects/" - + "*/instances/*/databases/*}:getIamPolicy:" - + "\001*ZA\".google.spanner.a" - + "dmin.database.v1.ListBackupOperationsRes" - + "ponse\"E\332A\006parent\202\323\344\223\0026\0224/v1/{parent=proj" - + "ects/*/instances/*}/backupOperations\022\334\001\n" - + "\021ListDatabaseRoles\022:.google.spanner.admi" - + "n.database.v1.ListDatabaseRolesRequest\032;" + + "instances/*/backups/*}:getIamPolicy:\001*ZU" + + "\"P/v1/{resource=projects/*/instances/*/d" + + "atabases/*/backupSchedules/*}:getIamPoli" + + "cy:\001*\022\324\003\n\022TestIamPermissions\022(.google.ia" + + "m.v1.TestIamPermissionsRequest\032).google." + + "iam.v1.TestIamPermissionsResponse\"\350\002\332A\024r" + + "esource,permissions\202\323\344\223\002\312\002\"D/v1/{resourc" + + "e=projects/*/instances/*/databases/*}:te" + + "stIamPermissions:\001*ZG\"B/v1/{resource=pro" + + "jects/*/instances/*/backups/*}:testIamPe" + + "rmissions:\001*Z[\"V/v1/{resource=projects/*" + + "/instances/*/databases/*/backupSchedules" + + "/*}:testIamPermissions:\001*ZY\"T/v1/{resour" + + "ce=projects/*/instances/*/databases/*/da" + + "tabaseRoles/*}:testIamPermissions:\001*\022\237\002\n" + + "\014CreateBackup\0225.google.spanner.admin.dat" + + "abase.v1.CreateBackupRequest\032\035.google.lo" + + "ngrunning.Operation\"\270\001\312A`\n\'google.spanne" + + "r.admin.database.v1.Backup\0225google.spann" + + "er.admin.database.v1.CreateBackupMetadat" + + "a\332A\027parent,backup,backup_id\202\323\344\223\0025\"+/v1/{" + + "parent=projects/*/instances/*}/backups:\006" + + "backup\022\254\002\n\nCopyBackup\0223.google.spanner.a" + + "dmin.database.v1.CopyBackupRequest\032\035.goo" + + "gle.longrunning.Operation\"\311\001\312A^\n\'google." + + "spanner.admin.database.v1.Backup\0223google" + + ".spanner.admin.database.v1.CopyBackupMet" + + "adata\332A*parent,backup_id,source_backup,e" + + "xpire_time\202\323\344\223\0025\"0/v1/{parent=projects/*" + + "/instances/*}/backups:copy:\001*\022\245\001\n\tGetBac" + + "kup\0222.google.spanner.admin.database.v1.G" + + "etBackupRequest\032(.google.spanner.admin.d" + + "atabase.v1.Backup\":\332A\004name\202\323\344\223\002-\022+/v1/{n" + + "ame=projects/*/instances/*/backups/*}\022\310\001" + + "\n\014UpdateBackup\0225.google.spanner.admin.da" + + "tabase.v1.UpdateBackupRequest\032(.google.s" + + "panner.admin.database.v1.Backup\"W\332A\022back" + + "up,update_mask\202\323\344\223\002<22/v1/{backup.name=p" + + "rojects/*/instances/*/backups/*}:\006backup" + + "\022\231\001\n\014DeleteBackup\0225.google.spanner.admin" + + ".database.v1.DeleteBackupRequest\032\026.googl" + + "e.protobuf.Empty\":\332A\004name\202\323\344\223\002-*+/v1/{na" + + "me=projects/*/instances/*/backups/*}\022\270\001\n" + + "\013ListBackups\0224.google.spanner.admin.data" + + "base.v1.ListBackupsRequest\0325.google.span" + + "ner.admin.database.v1.ListBackupsRespons" + + "e\"<\332A\006parent\202\323\344\223\002-\022+/v1/{parent=projects" + + "/*/instances/*}/backups\022\261\002\n\017RestoreDatab" + + "ase\0228.google.spanner.admin.database.v1.R" + + "estoreDatabaseRequest\032\035.google.longrunni" + + "ng.Operation\"\304\001\312Ae\n)google.spanner.admin" + + ".database.v1.Database\0228google.spanner.ad" + + "min.database.v1.RestoreDatabaseMetadata\332" + + "A\031parent,database_id,backup\202\323\344\223\002:\"5/v1/{" + + "parent=projects/*/instances/*}/databases" + + ":restore:\001*\022\344\001\n\026ListDatabaseOperations\022?" + ".google.spanner.admin.database.v1.ListDa" - + "tabaseRolesResponse\"N\332A\006parent\202\323\344\223\002?\022=/v" - + "1/{parent=projects/*/instances/*/databas" - + "es/*}/databaseRoles\032x\312A\026spanner.googleap" - + "is.com\322A\\https://www.googleapis.com/auth" - + "/cloud-platform,https://www.googleapis.c" - + "om/auth/spanner.adminB\330\002\n$com.google.spa" - + "nner.admin.database.v1B\031SpannerDatabaseA" - + "dminProtoP\001ZFcloud.google.com/go/spanner" - + "/admin/database/apiv1/databasepb;databas" - + "epb\252\002&Google.Cloud.Spanner.Admin.Databas" - + "e.V1\312\002&Google\\Cloud\\Spanner\\Admin\\Databa" - + "se\\V1\352\002+Google::Cloud::Spanner::Admin::D" - + "atabase::V1\352AJ\n\037spanner.googleapis.com/I" - + "nstance\022\'projects/{project}/instances/{i" - + "nstance}b\006proto3" + + "tabaseOperationsRequest\032@.google.spanner" + + ".admin.database.v1.ListDatabaseOperation" + + "sResponse\"G\332A\006parent\202\323\344\223\0028\0226/v1/{parent=" + + "projects/*/instances/*}/databaseOperatio" + + "ns\022\334\001\n\024ListBackupOperations\022=.google.spa" + + "nner.admin.database.v1.ListBackupOperati" + + "onsRequest\032>.google.spanner.admin.databa" + + "se.v1.ListBackupOperationsResponse\"E\332A\006p" + + "arent\202\323\344\223\0026\0224/v1/{parent=projects/*/inst" + + "ances/*}/backupOperations\022\334\001\n\021ListDataba" + + "seRoles\022:.google.spanner.admin.database." + + "v1.ListDatabaseRolesRequest\032;.google.spa" + + "nner.admin.database.v1.ListDatabaseRoles" + + "Response\"N\332A\006parent\202\323\344\223\002?\022=/v1/{parent=p" + + "rojects/*/instances/*/databases/*}/datab" + + "aseRoles\022\216\002\n\024CreateBackupSchedule\022=.goog" + + "le.spanner.admin.database.v1.CreateBacku" + + "pScheduleRequest\0320.google.spanner.admin." + + "database.v1.BackupSchedule\"\204\001\332A)parent,b" + + "ackup_schedule,backup_schedule_id\202\323\344\223\002R\"" + + "?/v1/{parent=projects/*/instances/*/data" + + "bases/*}/backupSchedules:\017backup_schedul" + + "e\022\321\001\n\021GetBackupSchedule\022:.google.spanner" + + ".admin.database.v1.GetBackupScheduleRequ" + + "est\0320.google.spanner.admin.database.v1.B" + + "ackupSchedule\"N\332A\004name\202\323\344\223\002A\022?/v1/{name=" + + "projects/*/instances/*/databases/*/backu" + + "pSchedules/*}\022\220\002\n\024UpdateBackupSchedule\022=" + + ".google.spanner.admin.database.v1.Update" + + "BackupScheduleRequest\0320.google.spanner.a" + + "dmin.database.v1.BackupSchedule\"\206\001\332A\033bac" + + "kup_schedule,update_mask\202\323\344\223\002b2O/v1/{bac" + + "kup_schedule.name=projects/*/instances/*" + + "/databases/*/backupSchedules/*}:\017backup_" + + "schedule\022\275\001\n\024DeleteBackupSchedule\022=.goog" + + "le.spanner.admin.database.v1.DeleteBacku" + + "pScheduleRequest\032\026.google.protobuf.Empty" + + "\"N\332A\004name\202\323\344\223\002A*?/v1/{name=projects/*/in" + + "stances/*/databases/*/backupSchedules/*}" + + "\022\344\001\n\023ListBackupSchedules\022<.google.spanne" + + "r.admin.database.v1.ListBackupSchedulesR" + + "equest\032=.google.spanner.admin.database.v" + + "1.ListBackupSchedulesResponse\"P\332A\006parent" + + "\202\323\344\223\002A\022?/v1/{parent=projects/*/instances" + + "/*/databases/*}/backupSchedules\032x\312A\026span" + + "ner.googleapis.com\322A\\https://www.googlea" + + "pis.com/auth/cloud-platform,https://www." + + "googleapis.com/auth/spanner.adminB\330\002\n$co" + + "m.google.spanner.admin.database.v1B\031Span" + + "nerDatabaseAdminProtoP\001ZFcloud.google.co" + + "m/go/spanner/admin/database/apiv1/databa" + + "sepb;databasepb\252\002&Google.Cloud.Spanner.A" + + "dmin.Database.V1\312\002&Google\\Cloud\\Spanner\\" + + "Admin\\Database\\V1\352\002+Google::Cloud::Spann" + + "er::Admin::Database::V1\352AJ\n\037spanner.goog" + + "leapis.com/Instance\022\'projects/{project}/" + + "instances/{instance}b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -414,6 +453,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { com.google.protobuf.FieldMaskProto.getDescriptor(), com.google.protobuf.TimestampProto.getDescriptor(), com.google.spanner.admin.database.v1.BackupProto.getDescriptor(), + com.google.spanner.admin.database.v1.BackupScheduleProto.getDescriptor(), com.google.spanner.admin.database.v1.CommonProto.getDescriptor(), }); internal_static_google_spanner_admin_database_v1_RestoreInfo_descriptor = @@ -582,7 +622,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_database_v1_RestoreDatabaseEncryptionConfig_descriptor, new java.lang.String[] { - "EncryptionType", "KmsKeyName", + "EncryptionType", "KmsKeyName", "KmsKeyNames", }); internal_static_google_spanner_admin_database_v1_RestoreDatabaseMetadata_descriptor = getDescriptor().getMessageTypes().get(19); @@ -654,6 +694,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { com.google.protobuf.FieldMaskProto.getDescriptor(); com.google.protobuf.TimestampProto.getDescriptor(); com.google.spanner.admin.database.v1.BackupProto.getDescriptor(); + com.google.spanner.admin.database.v1.BackupScheduleProto.getDescriptor(); com.google.spanner.admin.database.v1.CommonProto.getDescriptor(); } diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupRequest.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupRequest.java index a01f91fef90..a11347980ff 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupRequest.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupRequest.java @@ -16,14 +16,15 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** * * *
    - * The request for [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup].
    + * The request for
    + * [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup].
      * 
    * * Protobuf type {@code google.spanner.admin.database.v1.UpdateBackupRequest} @@ -371,7 +372,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
    -   * The request for [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup].
    +   * The request for
    +   * [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup].
        * 
    * * Protobuf type {@code google.spanner.admin.database.v1.UpdateBackupRequest} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupRequestOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupRequestOrBuilder.java index c0039b9184c..49a4d96eecb 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/backup.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface UpdateBackupRequestOrBuilder diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupScheduleRequest.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupScheduleRequest.java new file mode 100644 index 00000000000..5eb515ab0fe --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupScheduleRequest.java @@ -0,0 +1,1107 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/backup_schedule.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.database.v1; + +/** + * + * + *
    + * The request for
    + * [UpdateBackupScheduleRequest][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.UpdateBackupScheduleRequest} + */ +public final class UpdateBackupScheduleRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.UpdateBackupScheduleRequest) + UpdateBackupScheduleRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use UpdateBackupScheduleRequest.newBuilder() to construct. + private UpdateBackupScheduleRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private UpdateBackupScheduleRequest() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new UpdateBackupScheduleRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_UpdateBackupScheduleRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_UpdateBackupScheduleRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest.class, + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest.Builder.class); + } + + private int bitField0_; + public static final int BACKUP_SCHEDULE_FIELD_NUMBER = 1; + private com.google.spanner.admin.database.v1.BackupSchedule backupSchedule_; + /** + * + * + *
    +   * Required. The backup schedule to update. `backup_schedule.name`, and the
    +   * fields to be updated as specified by `update_mask` are required. Other
    +   * fields are ignored.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the backupSchedule field is set. + */ + @java.lang.Override + public boolean hasBackupSchedule() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + *
    +   * Required. The backup schedule to update. `backup_schedule.name`, and the
    +   * fields to be updated as specified by `update_mask` are required. Other
    +   * fields are ignored.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The backupSchedule. + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupSchedule getBackupSchedule() { + return backupSchedule_ == null + ? com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance() + : backupSchedule_; + } + /** + * + * + *
    +   * Required. The backup schedule to update. `backup_schedule.name`, and the
    +   * fields to be updated as specified by `update_mask` are required. Other
    +   * fields are ignored.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.database.v1.BackupScheduleOrBuilder getBackupScheduleOrBuilder() { + return backupSchedule_ == null + ? com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance() + : backupSchedule_; + } + + public static final int UPDATE_MASK_FIELD_NUMBER = 2; + private com.google.protobuf.FieldMask updateMask_; + /** + * + * + *
    +   * Required. A mask specifying which fields in the BackupSchedule resource
    +   * should be updated. This mask is relative to the BackupSchedule resource,
    +   * not to the request message. The field mask must always be
    +   * specified; this prevents any future fields from being erased
    +   * accidentally.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + @java.lang.Override + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * + * + *
    +   * Required. A mask specifying which fields in the BackupSchedule resource
    +   * should be updated. This mask is relative to the BackupSchedule resource,
    +   * not to the request message. The field mask must always be
    +   * specified; this prevents any future fields from being erased
    +   * accidentally.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + @java.lang.Override + public com.google.protobuf.FieldMask getUpdateMask() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + /** + * + * + *
    +   * Required. A mask specifying which fields in the BackupSchedule resource
    +   * should be updated. This mask is relative to the BackupSchedule resource,
    +   * not to the request message. The field mask must always be
    +   * specified; this prevents any future fields from being erased
    +   * accidentally.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + return updateMask_ == null ? com.google.protobuf.FieldMask.getDefaultInstance() : updateMask_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getBackupSchedule()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getUpdateMask()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getBackupSchedule()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getUpdateMask()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest other = + (com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest) obj; + + if (hasBackupSchedule() != other.hasBackupSchedule()) return false; + if (hasBackupSchedule()) { + if (!getBackupSchedule().equals(other.getBackupSchedule())) return false; + } + if (hasUpdateMask() != other.hasUpdateMask()) return false; + if (hasUpdateMask()) { + if (!getUpdateMask().equals(other.getUpdateMask())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasBackupSchedule()) { + hash = (37 * hash) + BACKUP_SCHEDULE_FIELD_NUMBER; + hash = (53 * hash) + getBackupSchedule().hashCode(); + } + if (hasUpdateMask()) { + hash = (37 * hash) + UPDATE_MASK_FIELD_NUMBER; + hash = (53 * hash) + getUpdateMask().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
    +   * The request for
    +   * [UpdateBackupScheduleRequest][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.database.v1.UpdateBackupScheduleRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.UpdateBackupScheduleRequest) + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_UpdateBackupScheduleRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_UpdateBackupScheduleRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest.class, + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getBackupScheduleFieldBuilder(); + getUpdateMaskFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + backupSchedule_ = null; + if (backupScheduleBuilder_ != null) { + backupScheduleBuilder_.dispose(); + backupScheduleBuilder_ = null; + } + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.BackupScheduleProto + .internal_static_google_spanner_admin_database_v1_UpdateBackupScheduleRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest build() { + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest buildPartial() { + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest result = + new com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.backupSchedule_ = + backupScheduleBuilder_ == null ? backupSchedule_ : backupScheduleBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.updateMask_ = updateMaskBuilder_ == null ? updateMask_ : updateMaskBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest) { + return mergeFrom((com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest other) { + if (other + == com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest.getDefaultInstance()) + return this; + if (other.hasBackupSchedule()) { + mergeBackupSchedule(other.getBackupSchedule()); + } + if (other.hasUpdateMask()) { + mergeUpdateMask(other.getUpdateMask()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage(getBackupScheduleFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(getUpdateMaskFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.admin.database.v1.BackupSchedule backupSchedule_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.database.v1.BackupSchedule, + com.google.spanner.admin.database.v1.BackupSchedule.Builder, + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder> + backupScheduleBuilder_; + /** + * + * + *
    +     * Required. The backup schedule to update. `backup_schedule.name`, and the
    +     * fields to be updated as specified by `update_mask` are required. Other
    +     * fields are ignored.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the backupSchedule field is set. + */ + public boolean hasBackupSchedule() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + *
    +     * Required. The backup schedule to update. `backup_schedule.name`, and the
    +     * fields to be updated as specified by `update_mask` are required. Other
    +     * fields are ignored.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The backupSchedule. + */ + public com.google.spanner.admin.database.v1.BackupSchedule getBackupSchedule() { + if (backupScheduleBuilder_ == null) { + return backupSchedule_ == null + ? com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance() + : backupSchedule_; + } else { + return backupScheduleBuilder_.getMessage(); + } + } + /** + * + * + *
    +     * Required. The backup schedule to update. `backup_schedule.name`, and the
    +     * fields to be updated as specified by `update_mask` are required. Other
    +     * fields are ignored.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setBackupSchedule(com.google.spanner.admin.database.v1.BackupSchedule value) { + if (backupScheduleBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + backupSchedule_ = value; + } else { + backupScheduleBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. The backup schedule to update. `backup_schedule.name`, and the
    +     * fields to be updated as specified by `update_mask` are required. Other
    +     * fields are ignored.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setBackupSchedule( + com.google.spanner.admin.database.v1.BackupSchedule.Builder builderForValue) { + if (backupScheduleBuilder_ == null) { + backupSchedule_ = builderForValue.build(); + } else { + backupScheduleBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. The backup schedule to update. `backup_schedule.name`, and the
    +     * fields to be updated as specified by `update_mask` are required. Other
    +     * fields are ignored.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeBackupSchedule(com.google.spanner.admin.database.v1.BackupSchedule value) { + if (backupScheduleBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && backupSchedule_ != null + && backupSchedule_ + != com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance()) { + getBackupScheduleBuilder().mergeFrom(value); + } else { + backupSchedule_ = value; + } + } else { + backupScheduleBuilder_.mergeFrom(value); + } + if (backupSchedule_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + /** + * + * + *
    +     * Required. The backup schedule to update. `backup_schedule.name`, and the
    +     * fields to be updated as specified by `update_mask` are required. Other
    +     * fields are ignored.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearBackupSchedule() { + bitField0_ = (bitField0_ & ~0x00000001); + backupSchedule_ = null; + if (backupScheduleBuilder_ != null) { + backupScheduleBuilder_.dispose(); + backupScheduleBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. The backup schedule to update. `backup_schedule.name`, and the
    +     * fields to be updated as specified by `update_mask` are required. Other
    +     * fields are ignored.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.database.v1.BackupSchedule.Builder getBackupScheduleBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getBackupScheduleFieldBuilder().getBuilder(); + } + /** + * + * + *
    +     * Required. The backup schedule to update. `backup_schedule.name`, and the
    +     * fields to be updated as specified by `update_mask` are required. Other
    +     * fields are ignored.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.database.v1.BackupScheduleOrBuilder + getBackupScheduleOrBuilder() { + if (backupScheduleBuilder_ != null) { + return backupScheduleBuilder_.getMessageOrBuilder(); + } else { + return backupSchedule_ == null + ? com.google.spanner.admin.database.v1.BackupSchedule.getDefaultInstance() + : backupSchedule_; + } + } + /** + * + * + *
    +     * Required. The backup schedule to update. `backup_schedule.name`, and the
    +     * fields to be updated as specified by `update_mask` are required. Other
    +     * fields are ignored.
    +     * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.database.v1.BackupSchedule, + com.google.spanner.admin.database.v1.BackupSchedule.Builder, + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder> + getBackupScheduleFieldBuilder() { + if (backupScheduleBuilder_ == null) { + backupScheduleBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.database.v1.BackupSchedule, + com.google.spanner.admin.database.v1.BackupSchedule.Builder, + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder>( + getBackupSchedule(), getParentForChildren(), isClean()); + backupSchedule_ = null; + } + return backupScheduleBuilder_; + } + + private com.google.protobuf.FieldMask updateMask_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + updateMaskBuilder_; + /** + * + * + *
    +     * Required. A mask specifying which fields in the BackupSchedule resource
    +     * should be updated. This mask is relative to the BackupSchedule resource,
    +     * not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased
    +     * accidentally.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + public boolean hasUpdateMask() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * + * + *
    +     * Required. A mask specifying which fields in the BackupSchedule resource
    +     * should be updated. This mask is relative to the BackupSchedule resource,
    +     * not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased
    +     * accidentally.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + public com.google.protobuf.FieldMask getUpdateMask() { + if (updateMaskBuilder_ == null) { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } else { + return updateMaskBuilder_.getMessage(); + } + } + /** + * + * + *
    +     * Required. A mask specifying which fields in the BackupSchedule resource
    +     * should be updated. This mask is relative to the BackupSchedule resource,
    +     * not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased
    +     * accidentally.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + updateMask_ = value; + } else { + updateMaskBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. A mask specifying which fields in the BackupSchedule resource
    +     * should be updated. This mask is relative to the BackupSchedule resource,
    +     * not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased
    +     * accidentally.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setUpdateMask(com.google.protobuf.FieldMask.Builder builderForValue) { + if (updateMaskBuilder_ == null) { + updateMask_ = builderForValue.build(); + } else { + updateMaskBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. A mask specifying which fields in the BackupSchedule resource
    +     * should be updated. This mask is relative to the BackupSchedule resource,
    +     * not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased
    +     * accidentally.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) { + if (updateMaskBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && updateMask_ != null + && updateMask_ != com.google.protobuf.FieldMask.getDefaultInstance()) { + getUpdateMaskBuilder().mergeFrom(value); + } else { + updateMask_ = value; + } + } else { + updateMaskBuilder_.mergeFrom(value); + } + if (updateMask_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + /** + * + * + *
    +     * Required. A mask specifying which fields in the BackupSchedule resource
    +     * should be updated. This mask is relative to the BackupSchedule resource,
    +     * not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased
    +     * accidentally.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearUpdateMask() { + bitField0_ = (bitField0_ & ~0x00000002); + updateMask_ = null; + if (updateMaskBuilder_ != null) { + updateMaskBuilder_.dispose(); + updateMaskBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. A mask specifying which fields in the BackupSchedule resource
    +     * should be updated. This mask is relative to the BackupSchedule resource,
    +     * not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased
    +     * accidentally.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getUpdateMaskFieldBuilder().getBuilder(); + } + /** + * + * + *
    +     * Required. A mask specifying which fields in the BackupSchedule resource
    +     * should be updated. This mask is relative to the BackupSchedule resource,
    +     * not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased
    +     * accidentally.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { + if (updateMaskBuilder_ != null) { + return updateMaskBuilder_.getMessageOrBuilder(); + } else { + return updateMask_ == null + ? com.google.protobuf.FieldMask.getDefaultInstance() + : updateMask_; + } + } + /** + * + * + *
    +     * Required. A mask specifying which fields in the BackupSchedule resource
    +     * should be updated. This mask is relative to the BackupSchedule resource,
    +     * not to the request message. The field mask must always be
    +     * specified; this prevents any future fields from being erased
    +     * accidentally.
    +     * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder> + getUpdateMaskFieldBuilder() { + if (updateMaskBuilder_ == null) { + updateMaskBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.FieldMask, + com.google.protobuf.FieldMask.Builder, + com.google.protobuf.FieldMaskOrBuilder>( + getUpdateMask(), getParentForChildren(), isClean()); + updateMask_ = null; + } + return updateMaskBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.UpdateBackupScheduleRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.UpdateBackupScheduleRequest) + private static final com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest(); + } + + public static com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public UpdateBackupScheduleRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupScheduleRequestOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupScheduleRequestOrBuilder.java new file mode 100644 index 00000000000..04f89b1311e --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateBackupScheduleRequestOrBuilder.java @@ -0,0 +1,123 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/backup_schedule.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.database.v1; + +public interface UpdateBackupScheduleRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.UpdateBackupScheduleRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The backup schedule to update. `backup_schedule.name`, and the
    +   * fields to be updated as specified by `update_mask` are required. Other
    +   * fields are ignored.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the backupSchedule field is set. + */ + boolean hasBackupSchedule(); + /** + * + * + *
    +   * Required. The backup schedule to update. `backup_schedule.name`, and the
    +   * fields to be updated as specified by `update_mask` are required. Other
    +   * fields are ignored.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The backupSchedule. + */ + com.google.spanner.admin.database.v1.BackupSchedule getBackupSchedule(); + /** + * + * + *
    +   * Required. The backup schedule to update. `backup_schedule.name`, and the
    +   * fields to be updated as specified by `update_mask` are required. Other
    +   * fields are ignored.
    +   * 
    + * + * + * .google.spanner.admin.database.v1.BackupSchedule backup_schedule = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.admin.database.v1.BackupScheduleOrBuilder getBackupScheduleOrBuilder(); + + /** + * + * + *
    +   * Required. A mask specifying which fields in the BackupSchedule resource
    +   * should be updated. This mask is relative to the BackupSchedule resource,
    +   * not to the request message. The field mask must always be
    +   * specified; this prevents any future fields from being erased
    +   * accidentally.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the updateMask field is set. + */ + boolean hasUpdateMask(); + /** + * + * + *
    +   * Required. A mask specifying which fields in the BackupSchedule resource
    +   * should be updated. This mask is relative to the BackupSchedule resource,
    +   * not to the request message. The field mask must always be
    +   * specified; this prevents any future fields from being erased
    +   * accidentally.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The updateMask. + */ + com.google.protobuf.FieldMask getUpdateMask(); + /** + * + * + *
    +   * Required. A mask specifying which fields in the BackupSchedule resource
    +   * should be updated. This mask is relative to the BackupSchedule resource,
    +   * not to the request message. The field mask must always be
    +   * specified; this prevents any future fields from being erased
    +   * accidentally.
    +   * 
    + * + * .google.protobuf.FieldMask update_mask = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder(); +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlMetadata.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlMetadata.java index 7382889bcca..852bfc21117 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlMetadata.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlMetadata.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlMetadataOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlMetadataOrBuilder.java index edd7cdd12f5..3d29d7306a8 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlMetadataOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlMetadataOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface UpdateDatabaseDdlMetadataOrBuilder diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlRequest.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlRequest.java index 63da4ea5b60..94ea338d85f 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlRequest.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** @@ -37,8 +37,8 @@ * Each batch of statements is assigned a name which can be used with * the [Operations][google.longrunning.Operations] API to monitor * progress. See the - * [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id] field for more - * details. + * [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id] + * field for more details. *
    * * Protobuf type {@code google.spanner.admin.database.v1.UpdateDatabaseDdlRequest} @@ -215,18 +215,20 @@ public com.google.protobuf.ByteString getStatementsBytes(int index) { * * Specifying an explicit operation ID simplifies determining * whether the statements were executed in the event that the - * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] call is replayed, - * or the return value is otherwise lost: the [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] and - * `operation_id` fields can be combined to form the + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + * call is replayed, or the return value is otherwise lost: the + * [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] + * and `operation_id` fields can be combined to form the * [name][google.longrunning.Operation.name] of the resulting - * [longrunning.Operation][google.longrunning.Operation]: `<database>/operations/<operation_id>`. + * [longrunning.Operation][google.longrunning.Operation]: + * `<database>/operations/<operation_id>`. * * `operation_id` should be unique within the database, and must be * a valid identifier: `[a-z][a-z0-9_]*`. Note that * automatically-generated operation IDs always begin with an * underscore. If the named operation already exists, - * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] returns - * `ALREADY_EXISTS`. + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + * returns `ALREADY_EXISTS`. *
    * * string operation_id = 3; @@ -256,18 +258,20 @@ public java.lang.String getOperationId() { * * Specifying an explicit operation ID simplifies determining * whether the statements were executed in the event that the - * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] call is replayed, - * or the return value is otherwise lost: the [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] and - * `operation_id` fields can be combined to form the + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + * call is replayed, or the return value is otherwise lost: the + * [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] + * and `operation_id` fields can be combined to form the * [name][google.longrunning.Operation.name] of the resulting - * [longrunning.Operation][google.longrunning.Operation]: `<database>/operations/<operation_id>`. + * [longrunning.Operation][google.longrunning.Operation]: + * `<database>/operations/<operation_id>`. * * `operation_id` should be unique within the database, and must be * a valid identifier: `[a-z][a-z0-9_]*`. Note that * automatically-generated operation IDs always begin with an * underscore. If the named operation already exists, - * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] returns - * `ALREADY_EXISTS`. + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + * returns `ALREADY_EXISTS`. *
    * * string operation_id = 3; @@ -530,8 +534,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * Each batch of statements is assigned a name which can be used with * the [Operations][google.longrunning.Operations] API to monitor * progress. See the - * [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id] field for more - * details. + * [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id] + * field for more details. *
    * * Protobuf type {@code google.spanner.admin.database.v1.UpdateDatabaseDdlRequest} @@ -1065,18 +1069,20 @@ public Builder addStatementsBytes(com.google.protobuf.ByteString value) { * * Specifying an explicit operation ID simplifies determining * whether the statements were executed in the event that the - * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] call is replayed, - * or the return value is otherwise lost: the [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] and - * `operation_id` fields can be combined to form the + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + * call is replayed, or the return value is otherwise lost: the + * [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] + * and `operation_id` fields can be combined to form the * [name][google.longrunning.Operation.name] of the resulting - * [longrunning.Operation][google.longrunning.Operation]: `<database>/operations/<operation_id>`. + * [longrunning.Operation][google.longrunning.Operation]: + * `<database>/operations/<operation_id>`. * * `operation_id` should be unique within the database, and must be * a valid identifier: `[a-z][a-z0-9_]*`. Note that * automatically-generated operation IDs always begin with an * underscore. If the named operation already exists, - * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] returns - * `ALREADY_EXISTS`. + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + * returns `ALREADY_EXISTS`. *
    * * string operation_id = 3; @@ -1105,18 +1111,20 @@ public java.lang.String getOperationId() { * * Specifying an explicit operation ID simplifies determining * whether the statements were executed in the event that the - * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] call is replayed, - * or the return value is otherwise lost: the [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] and - * `operation_id` fields can be combined to form the + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + * call is replayed, or the return value is otherwise lost: the + * [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] + * and `operation_id` fields can be combined to form the * [name][google.longrunning.Operation.name] of the resulting - * [longrunning.Operation][google.longrunning.Operation]: `<database>/operations/<operation_id>`. + * [longrunning.Operation][google.longrunning.Operation]: + * `<database>/operations/<operation_id>`. * * `operation_id` should be unique within the database, and must be * a valid identifier: `[a-z][a-z0-9_]*`. Note that * automatically-generated operation IDs always begin with an * underscore. If the named operation already exists, - * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] returns - * `ALREADY_EXISTS`. + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + * returns `ALREADY_EXISTS`. *
    * * string operation_id = 3; @@ -1145,18 +1153,20 @@ public com.google.protobuf.ByteString getOperationIdBytes() { * * Specifying an explicit operation ID simplifies determining * whether the statements were executed in the event that the - * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] call is replayed, - * or the return value is otherwise lost: the [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] and - * `operation_id` fields can be combined to form the + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + * call is replayed, or the return value is otherwise lost: the + * [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] + * and `operation_id` fields can be combined to form the * [name][google.longrunning.Operation.name] of the resulting - * [longrunning.Operation][google.longrunning.Operation]: `<database>/operations/<operation_id>`. + * [longrunning.Operation][google.longrunning.Operation]: + * `<database>/operations/<operation_id>`. * * `operation_id` should be unique within the database, and must be * a valid identifier: `[a-z][a-z0-9_]*`. Note that * automatically-generated operation IDs always begin with an * underscore. If the named operation already exists, - * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] returns - * `ALREADY_EXISTS`. + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + * returns `ALREADY_EXISTS`. *
    * * string operation_id = 3; @@ -1184,18 +1194,20 @@ public Builder setOperationId(java.lang.String value) { * * Specifying an explicit operation ID simplifies determining * whether the statements were executed in the event that the - * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] call is replayed, - * or the return value is otherwise lost: the [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] and - * `operation_id` fields can be combined to form the + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + * call is replayed, or the return value is otherwise lost: the + * [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] + * and `operation_id` fields can be combined to form the * [name][google.longrunning.Operation.name] of the resulting - * [longrunning.Operation][google.longrunning.Operation]: `<database>/operations/<operation_id>`. + * [longrunning.Operation][google.longrunning.Operation]: + * `<database>/operations/<operation_id>`. * * `operation_id` should be unique within the database, and must be * a valid identifier: `[a-z][a-z0-9_]*`. Note that * automatically-generated operation IDs always begin with an * underscore. If the named operation already exists, - * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] returns - * `ALREADY_EXISTS`. + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + * returns `ALREADY_EXISTS`. *
    * * string operation_id = 3; @@ -1219,18 +1231,20 @@ public Builder clearOperationId() { * * Specifying an explicit operation ID simplifies determining * whether the statements were executed in the event that the - * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] call is replayed, - * or the return value is otherwise lost: the [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] and - * `operation_id` fields can be combined to form the + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + * call is replayed, or the return value is otherwise lost: the + * [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] + * and `operation_id` fields can be combined to form the * [name][google.longrunning.Operation.name] of the resulting - * [longrunning.Operation][google.longrunning.Operation]: `<database>/operations/<operation_id>`. + * [longrunning.Operation][google.longrunning.Operation]: + * `<database>/operations/<operation_id>`. * * `operation_id` should be unique within the database, and must be * a valid identifier: `[a-z][a-z0-9_]*`. Note that * automatically-generated operation IDs always begin with an * underscore. If the named operation already exists, - * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] returns - * `ALREADY_EXISTS`. + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + * returns `ALREADY_EXISTS`. *
    * * string operation_id = 3; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlRequestOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlRequestOrBuilder.java index 6c0d078fda4..4e1de5b899e 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseDdlRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface UpdateDatabaseDdlRequestOrBuilder @@ -115,18 +115,20 @@ public interface UpdateDatabaseDdlRequestOrBuilder * * Specifying an explicit operation ID simplifies determining * whether the statements were executed in the event that the - * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] call is replayed, - * or the return value is otherwise lost: the [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] and - * `operation_id` fields can be combined to form the + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + * call is replayed, or the return value is otherwise lost: the + * [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] + * and `operation_id` fields can be combined to form the * [name][google.longrunning.Operation.name] of the resulting - * [longrunning.Operation][google.longrunning.Operation]: `<database>/operations/<operation_id>`. + * [longrunning.Operation][google.longrunning.Operation]: + * `<database>/operations/<operation_id>`. * * `operation_id` should be unique within the database, and must be * a valid identifier: `[a-z][a-z0-9_]*`. Note that * automatically-generated operation IDs always begin with an * underscore. If the named operation already exists, - * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] returns - * `ALREADY_EXISTS`. + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + * returns `ALREADY_EXISTS`. *
    * * string operation_id = 3; @@ -145,18 +147,20 @@ public interface UpdateDatabaseDdlRequestOrBuilder * * Specifying an explicit operation ID simplifies determining * whether the statements were executed in the event that the - * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] call is replayed, - * or the return value is otherwise lost: the [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] and - * `operation_id` fields can be combined to form the + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + * call is replayed, or the return value is otherwise lost: the + * [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] + * and `operation_id` fields can be combined to form the * [name][google.longrunning.Operation.name] of the resulting - * [longrunning.Operation][google.longrunning.Operation]: `<database>/operations/<operation_id>`. + * [longrunning.Operation][google.longrunning.Operation]: + * `<database>/operations/<operation_id>`. * * `operation_id` should be unique within the database, and must be * a valid identifier: `[a-z][a-z0-9_]*`. Note that * automatically-generated operation IDs always begin with an * underscore. If the named operation already exists, - * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] returns - * `ALREADY_EXISTS`. + * [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + * returns `ALREADY_EXISTS`. *
    * * string operation_id = 3; diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseMetadata.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseMetadata.java index 5323862fdb4..986f13bb6ee 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseMetadata.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseMetadata.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseMetadataOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseMetadataOrBuilder.java index d41b3e92aba..7d57a7fa341 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseMetadataOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseMetadataOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface UpdateDatabaseMetadataOrBuilder diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseRequest.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseRequest.java index c712da230f9..3234ecb07da 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseRequest.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; /** diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseRequestOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseRequestOrBuilder.java index 41bdf84d50c..38172dc67cb 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/UpdateDatabaseRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/database/v1/spanner_database_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.database.v1; public interface UpdateDatabaseRequestOrBuilder diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/backup.proto b/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/backup.proto index fce69a2f3e3..f3473f4eabf 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/backup.proto +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/backup.proto @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -51,14 +51,14 @@ message Backup { READY = 2; } - // Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation. - // Name of the database from which this backup was - // created. This needs to be in the same instance as the backup. - // Values are of the form + // Required for the + // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + // operation. Name of the database from which this backup was created. This + // needs to be in the same instance as the backup. Values are of the form // `projects//instances//databases/`. string database = 2 [(google.api.resource_reference) = { - type: "spanner.googleapis.com/Database" - }]; + type: "spanner.googleapis.com/Database" + }]; // The backup will contain an externally consistent copy of the database at // the timestamp specified by `version_time`. If `version_time` is not @@ -66,7 +66,8 @@ message Backup { // backup. google.protobuf.Timestamp version_time = 9; - // Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + // Required for the + // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] // operation. The expiration time of the backup, with microseconds // granularity that must be at least 6 hours and at most 366 days // from the time the CreateBackup request is processed. Once the `expire_time` @@ -74,8 +75,11 @@ message Backup { // Spanner to free the resources used by the backup. google.protobuf.Timestamp expire_time = 3; - // Output only for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation. - // Required for the [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup] operation. + // Output only for the + // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + // operation. Required for the + // [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup] + // operation. // // A globally unique identifier for the backup which cannot be // changed. Values are of the form @@ -89,14 +93,34 @@ message Backup { // `projects//instances/`. string name = 1; - // Output only. The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + // Output only. The time the + // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] // request is received. If the request does not specify `version_time`, the // `version_time` of the backup will be equivalent to the `create_time`. - google.protobuf.Timestamp create_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp create_time = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Size of the backup in bytes. int64 size_bytes = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Output only. The number of bytes that will be freed by deleting this + // backup. This value will be zero if, for example, this backup is part of an + // incremental backup chain and younger backups in the chain require that we + // keep its data. For backups not in an incremental backup chain, this is + // always the size of the backup. This value may change if backups on the same + // chain get created, deleted or expired. + int64 freeable_size_bytes = 15 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. For a backup in an incremental backup chain, this is the + // storage space needed to keep the data that has changed since the previous + // backup. For all other backups, this is always the size of the backup. This + // value may change if backups on the same chain get deleted or expired. + // + // This field can be used to calculate the total storage space used by a set + // of backups. For example, the total space used by all backups of a database + // can be computed by summing up this field. + int64 exclusive_size_bytes = 16 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Output only. The current state of the backup. State state = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; @@ -115,10 +139,21 @@ message Backup { ]; // Output only. The encryption information for the backup. - EncryptionInfo encryption_info = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + EncryptionInfo encryption_info = 8 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The encryption information for the backup, whether it is + // protected by one or more KMS keys. The information includes all Cloud + // KMS key versions used to encrypt the backup. The `encryption_status' field + // inside of each `EncryptionInfo` is not populated. At least one of the key + // versions must be available for the backup to be restored. If a key version + // is revoked in the middle of a restore, the restore behavior is undefined. + repeated EncryptionInfo encryption_information = 13 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The database dialect information for the backup. - DatabaseDialect database_dialect = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; + DatabaseDialect database_dialect = 10 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The names of the destination backups being created by copying // this source backup. The backup names are of the form @@ -129,9 +164,7 @@ message Backup { // destination backup is deleted), the reference to the backup is removed. repeated string referencing_backups = 11 [ (google.api.field_behavior) = OUTPUT_ONLY, - (google.api.resource_reference) = { - type: "spanner.googleapis.com/Backup" - } + (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" } ]; // Output only. The max allowed expiration time of the backup, with @@ -139,10 +172,45 @@ message Backup { // multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or // copying an existing backup, the expiration time specified must be // less than `Backup.max_expire_time`. - google.protobuf.Timestamp max_expire_time = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp max_expire_time = 12 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. List of backup schedule URIs that are associated with + // creating this backup. This is only applicable for scheduled backups, and + // is empty for on-demand backups. + // + // To optimize for storage, whenever possible, multiple schedules are + // collapsed together to create one backup. In such cases, this field captures + // the list of all backup schedule URIs that are associated with creating + // this backup. If collapsing is not done, then this field captures the + // single backup schedule URI associated with creating this backup. + repeated string backup_schedules = 14 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/BackupSchedule" + } + ]; + + // Output only. Populated only for backups in an incremental backup chain. + // Backups share the same chain id if and only if they belong to the same + // incremental backup chain. Use this field to determine which backups are + // part of the same incremental backup chain. The ordering of backups in the + // chain can be determined by ordering the backup `version_time`. + string incremental_backup_chain_id = 17 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Data deleted at a time older than this is guaranteed not to be + // retained in order to support this backup. For a backup in an incremental + // backup chain, this is the version time of the oldest backup that exists or + // ever existed in the chain. For all other backups, this is the version time + // of the backup. This field can be used to understand what data is being + // retained by the backup system. + google.protobuf.Timestamp oldest_version_time = 18 + [(google.api.field_behavior) = OUTPUT_ONLY]; } -// The request for [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]. +// The request for +// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]. message CreateBackupRequest { // Required. The name of the instance in which the backup will be // created. This must be the same instance that contains the database the @@ -165,29 +233,31 @@ message CreateBackupRequest { // Required. The backup to create. Backup backup = 3 [(google.api.field_behavior) = REQUIRED]; - // Optional. The encryption configuration used to encrypt the backup. If this field is - // not specified, the backup will use the same - // encryption configuration as the database by default, namely - // [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] = - // `USE_DATABASE_ENCRYPTION`. - CreateBackupEncryptionConfig encryption_config = 4 [(google.api.field_behavior) = OPTIONAL]; + // Optional. The encryption configuration used to encrypt the backup. If this + // field is not specified, the backup will use the same encryption + // configuration as the database by default, namely + // [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] + // = `USE_DATABASE_ENCRYPTION`. + CreateBackupEncryptionConfig encryption_config = 4 + [(google.api.field_behavior) = OPTIONAL]; } // Metadata type for the operation returned by // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]. message CreateBackupMetadata { // The name of the backup being created. - string name = 1 [(google.api.resource_reference) = { - type: "spanner.googleapis.com/Backup" - }]; + string name = 1 [ + (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" } + ]; // The name of the database the backup is created from. string database = 2 [(google.api.resource_reference) = { - type: "spanner.googleapis.com/Database" - }]; + type: "spanner.googleapis.com/Database" + }]; // The progress of the - // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation. + // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + // operation. OperationProgress progress = 3; // The time at which cancellation of this operation was received. @@ -205,10 +275,11 @@ message CreateBackupMetadata { google.protobuf.Timestamp cancel_time = 4; } -// The request for [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]. +// The request for +// [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]. message CopyBackupRequest { - // Required. The name of the destination instance that will contain the backup copy. - // Values are of the form: `projects//instances/`. + // Required. The name of the destination instance that will contain the backup + // copy. Values are of the form: `projects//instances/`. string parent = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { @@ -229,9 +300,7 @@ message CopyBackupRequest { // `projects//instances//backups/`. string source_backup = 3 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "spanner.googleapis.com/Backup" - } + (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" } ]; // Required. The expiration time of the backup in microsecond granularity. @@ -239,35 +308,38 @@ message CopyBackupRequest { // from the `create_time` of the source backup. Once the `expire_time` has // passed, the backup is eligible to be automatically deleted by Cloud Spanner // to free the resources used by the backup. - google.protobuf.Timestamp expire_time = 4 [(google.api.field_behavior) = REQUIRED]; - - // Optional. The encryption configuration used to encrypt the backup. If this field is - // not specified, the backup will use the same - // encryption configuration as the source backup by default, namely - // [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] = - // `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`. - CopyBackupEncryptionConfig encryption_config = 5 [(google.api.field_behavior) = OPTIONAL]; + google.protobuf.Timestamp expire_time = 4 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. The encryption configuration used to encrypt the backup. If this + // field is not specified, the backup will use the same encryption + // configuration as the source backup by default, namely + // [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] + // = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`. + CopyBackupEncryptionConfig encryption_config = 5 + [(google.api.field_behavior) = OPTIONAL]; } -// Metadata type for the google.longrunning.Operation returned by +// Metadata type for the operation returned by // [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]. message CopyBackupMetadata { // The name of the backup being created through the copy operation. // Values are of the form // `projects//instances//backups/`. - string name = 1 [(google.api.resource_reference) = { - type: "spanner.googleapis.com/Backup" - }]; + string name = 1 [ + (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" } + ]; // The name of the source backup that is being copied. // Values are of the form // `projects//instances//backups/`. - string source_backup = 2 [(google.api.resource_reference) = { - type: "spanner.googleapis.com/Backup" - }]; + string source_backup = 2 [ + (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" } + ]; // The progress of the - // [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] operation. + // [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] + // operation. OperationProgress progress = 3; // The time at which cancellation of CopyBackup operation was received. @@ -285,7 +357,8 @@ message CopyBackupMetadata { google.protobuf.Timestamp cancel_time = 4; } -// The request for [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]. +// The request for +// [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]. message UpdateBackupRequest { // Required. The backup to update. `backup.name`, and the fields to be updated // as specified by `update_mask` are required. Other fields are ignored. @@ -298,36 +371,36 @@ message UpdateBackupRequest { // resource, not to the request message. The field mask must always be // specified; this prevents any future fields from being erased accidentally // by clients that do not know about them. - google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED]; + google.protobuf.FieldMask update_mask = 2 + [(google.api.field_behavior) = REQUIRED]; } -// The request for [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup]. +// The request for +// [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup]. message GetBackupRequest { // Required. Name of the backup. // Values are of the form // `projects//instances//backups/`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "spanner.googleapis.com/Backup" - } + (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" } ]; } -// The request for [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup]. +// The request for +// [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup]. message DeleteBackupRequest { // Required. Name of the backup to delete. // Values are of the form // `projects//instances//backups/`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "spanner.googleapis.com/Backup" - } + (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" } ]; } -// The request for [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]. +// The request for +// [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]. message ListBackupsRequest { // Required. The instance to list backups from. Values are of the // form `projects//instances/`. @@ -346,7 +419,9 @@ message ListBackupsRequest { // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. // Colon `:` is the contains operator. Filter rules are not case sensitive. // - // The following fields in the [Backup][google.spanner.admin.database.v1.Backup] are eligible for filtering: + // The following fields in the + // [Backup][google.spanner.admin.database.v1.Backup] are eligible for + // filtering: // // * `name` // * `database` @@ -355,6 +430,7 @@ message ListBackupsRequest { // * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) // * `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) // * `size_bytes` + // * `backup_schedules` // // You can combine multiple expressions by enclosing each expression in // parentheses. By default, expressions are combined with AND logic, but @@ -373,6 +449,8 @@ message ListBackupsRequest { // * `expire_time < \"2018-03-28T14:50:00Z\"` // - The backup `expire_time` is before 2018-03-28T14:50:00Z. // * `size_bytes > 10000000000` - The backup's size is greater than 10GB + // * `backup_schedules:daily` + // - The backup is created from a schedule with "daily" in its name. string filter = 2; // Number of backups to be returned in the response. If 0 or @@ -380,21 +458,23 @@ message ListBackupsRequest { int32 page_size = 3; // If non-empty, `page_token` should contain a - // [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token] from a - // previous [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse] to the same `parent` and with the same - // `filter`. + // [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token] + // from a previous + // [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse] + // to the same `parent` and with the same `filter`. string page_token = 4; } -// The response for [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]. +// The response for +// [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]. message ListBackupsResponse { // The list of matching backups. Backups returned are ordered by `create_time` // in descending order, starting from the most recent `create_time`. repeated Backup backups = 1; // `next_page_token` can be sent in a subsequent - // [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups] call to fetch more - // of the matching backups. + // [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups] + // call to fetch more of the matching backups. string next_page_token = 2; } @@ -424,7 +504,9 @@ message ListBackupOperationsRequest { // * `name` - The name of the long-running operation // * `done` - False if the operation is in progress, else true. // * `metadata.@type` - the type of metadata. For example, the type string - // for [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] is + // for + // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + // is // `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`. // * `metadata.` - any field in metadata.value. // `metadata.@type` must be specified first if filtering on metadata @@ -442,14 +524,15 @@ message ListBackupOperationsRequest { // * `done:true` - The operation is complete. // * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ // `metadata.database:prod` - Returns operations where: - // * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. - // * The database the backup was taken from has a name containing the - // string "prod". + // * The operation's metadata type is + // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + // * The source database name of backup contains the string "prod". // * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ // `(metadata.name:howl) AND` \ // `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ // `(error:*)` - Returns operations where: - // * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + // * The operation's metadata type is + // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. // * The backup name contains the string "howl". // * The operation started before 2018-03-28T14:50:00Z. // * The operation resulted in an error. @@ -457,9 +540,9 @@ message ListBackupOperationsRequest { // `(metadata.source_backup:test) AND` \ // `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \ // `(error:*)` - Returns operations where: - // * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. - // * The source backup of the copied backup name contains the string - // "test". + // * The operation's metadata type is + // [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. + // * The source backup name contains the string "test". // * The operation started before 2022-01-18T14:50:00Z. // * The operation resulted in an error. // * `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \ @@ -469,12 +552,13 @@ message ListBackupOperationsRequest { // `(metadata.source_backup:test_bkp)) AND` \ // `(error:*)` - Returns operations where: // * The operation's metadata matches either of criteria: - // * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] AND the - // database the backup was taken from has name containing string + // * The operation's metadata type is + // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] + // AND the source database name of the backup contains the string // "test_db" - // * The operation's metadata type is [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] AND the - // backup the backup was copied from has name containing string - // "test_bkp" + // * The operation's metadata type is + // [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata] + // AND the source backup name contains the string "test_bkp" // * The operation resulted in an error. string filter = 2; @@ -484,8 +568,9 @@ message ListBackupOperationsRequest { // If non-empty, `page_token` should contain a // [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token] - // from a previous [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse] to the - // same `parent` and with the same `filter`. + // from a previous + // [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse] + // to the same `parent` and with the same `filter`. string page_token = 4; } @@ -512,25 +597,26 @@ message ListBackupOperationsResponse { // Information about a backup. message BackupInfo { // Name of the backup. - string backup = 1 [(google.api.resource_reference) = { - type: "spanner.googleapis.com/Backup" - }]; + string backup = 1 [ + (google.api.resource_reference) = { type: "spanner.googleapis.com/Backup" } + ]; // The backup contains an externally consistent copy of `source_database` at // the timestamp specified by `version_time`. If the - // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request did not specify - // `version_time`, the `version_time` of the backup is equivalent to the - // `create_time`. + // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + // request did not specify `version_time`, the `version_time` of the backup is + // equivalent to the `create_time`. google.protobuf.Timestamp version_time = 4; - // The time the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request was - // received. + // The time the + // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + // request was received. google.protobuf.Timestamp create_time = 2; // Name of the database the backup was created from. string source_database = 3 [(google.api.resource_reference) = { - type: "spanner.googleapis.com/Database" - }]; + type: "spanner.googleapis.com/Database" + }]; } // Encryption configuration for the backup to create. @@ -542,9 +628,10 @@ message CreateBackupEncryptionConfig { // Use the same encryption configuration as the database. This is the // default option when - // [encryption_config][google.spanner.admin.database.v1.CreateBackupEncryptionConfig] is empty. - // For example, if the database is using `Customer_Managed_Encryption`, the - // backup will be using the same Cloud KMS key as the database. + // [encryption_config][google.spanner.admin.database.v1.CreateBackupEncryptionConfig] + // is empty. For example, if the database is using + // `Customer_Managed_Encryption`, the backup will be using the same Cloud + // KMS key as the database. USE_DATABASE_ENCRYPTION = 1; // Use Google default encryption. @@ -560,8 +647,8 @@ message CreateBackupEncryptionConfig { // Optional. The Cloud KMS key that will be used to protect the backup. // This field should be set only when - // [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] is - // `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form + // [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type] + // is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form // `projects//locations//keyRings//cryptoKeys/`. string kms_key_name = 2 [ (google.api.field_behavior) = OPTIONAL, @@ -569,6 +656,28 @@ message CreateBackupEncryptionConfig { type: "cloudkms.googleapis.com/CryptoKey" } ]; + + // Optional. Specifies the KMS configuration for the one or more keys used to + // protect the backup. Values are of the form + // `projects//locations//keyRings//cryptoKeys/`. + // + // The keys referenced by kms_key_names must fully cover all + // regions of the backup's instance configuration. Some examples: + // * For single region instance configs, specify a single regional + // location KMS key. + // * For multi-regional instance configs of type GOOGLE_MANAGED, + // either specify a multi-regional location KMS key or multiple regional + // location KMS keys that cover all regions in the instance config. + // * For an instance config of type USER_MANAGED, please specify only + // regional location KMS keys to cover each region in the instance config. + // Multi-regional location KMS keys are not supported for USER_MANAGED + // instance configs. + repeated string kms_key_names = 3 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.resource_reference) = { + type: "cloudkms.googleapis.com/CryptoKey" + } + ]; } // Encryption configuration for the copied backup. @@ -578,17 +687,20 @@ message CopyBackupEncryptionConfig { // Unspecified. Do not use. ENCRYPTION_TYPE_UNSPECIFIED = 0; - // This is the default option for [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] - // when [encryption_config][google.spanner.admin.database.v1.CopyBackupEncryptionConfig] is not specified. - // For example, if the source backup is using `Customer_Managed_Encryption`, - // the backup will be using the same Cloud KMS key as the source backup. + // This is the default option for + // [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup] + // when + // [encryption_config][google.spanner.admin.database.v1.CopyBackupEncryptionConfig] + // is not specified. For example, if the source backup is using + // `Customer_Managed_Encryption`, the backup will be using the same Cloud + // KMS key as the source backup. USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION = 1; // Use Google default encryption. GOOGLE_DEFAULT_ENCRYPTION = 2; - // Use customer managed encryption. If specified, `kms_key_name` - // must contain a valid Cloud KMS key. + // Use customer managed encryption. If specified, either `kms_key_name` or + // `kms_key_names` must contain valid Cloud KMS key(s). CUSTOMER_MANAGED_ENCRYPTION = 3; } @@ -597,8 +709,8 @@ message CopyBackupEncryptionConfig { // Optional. The Cloud KMS key that will be used to protect the backup. // This field should be set only when - // [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] is - // `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form + // [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type] + // is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form // `projects//locations//keyRings//cryptoKeys/`. string kms_key_name = 2 [ (google.api.field_behavior) = OPTIONAL, @@ -606,4 +718,40 @@ message CopyBackupEncryptionConfig { type: "cloudkms.googleapis.com/CryptoKey" } ]; + + // Optional. Specifies the KMS configuration for the one or more keys used to + // protect the backup. Values are of the form + // `projects//locations//keyRings//cryptoKeys/`. + // Kms keys specified can be in any order. + // + // The keys referenced by kms_key_names must fully cover all + // regions of the backup's instance configuration. Some examples: + // * For single region instance configs, specify a single regional + // location KMS key. + // * For multi-regional instance configs of type GOOGLE_MANAGED, + // either specify a multi-regional location KMS key or multiple regional + // location KMS keys that cover all regions in the instance config. + // * For an instance config of type USER_MANAGED, please specify only + // regional location KMS keys to cover each region in the instance config. + // Multi-regional location KMS keys are not supported for USER_MANAGED + // instance configs. + repeated string kms_key_names = 3 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.resource_reference) = { + type: "cloudkms.googleapis.com/CryptoKey" + } + ]; } + +// The specification for full backups. +// A full backup stores the entire contents of the database at a given +// version time. +message FullBackupSpec {} + +// The specification for incremental backup chains. +// An incremental backup stores the delta of changes between a previous +// backup and the database contents at a given version time. An +// incremental backup chain consists of a full backup and zero or more +// successive incremental backups. The first backup created for an +// incremental backup chain is always a full backup. +message IncrementalBackupSpec {} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/backup_schedule.proto b/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/backup_schedule.proto new file mode 100644 index 00000000000..c9b5e7e3f4b --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/backup_schedule.proto @@ -0,0 +1,230 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.spanner.admin.database.v1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/timestamp.proto"; +import "google/spanner/admin/database/v1/backup.proto"; + +option csharp_namespace = "Google.Cloud.Spanner.Admin.Database.V1"; +option go_package = "cloud.google.com/go/spanner/admin/database/apiv1/databasepb;databasepb"; +option java_multiple_files = true; +option java_outer_classname = "BackupScheduleProto"; +option java_package = "com.google.spanner.admin.database.v1"; +option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Database\\V1"; +option ruby_package = "Google::Cloud::Spanner::Admin::Database::V1"; + +// Defines specifications of the backup schedule. +message BackupScheduleSpec { + // Required. + oneof schedule_spec { + // Cron style schedule specification. + CrontabSpec cron_spec = 1; + } +} + +// BackupSchedule expresses the automated backup creation specification for a +// Spanner database. +// Next ID: 10 +message BackupSchedule { + option (google.api.resource) = { + type: "spanner.googleapis.com/BackupSchedule" + pattern: "projects/{project}/instances/{instance}/databases/{database}/backupSchedules/{schedule}" + plural: "backupSchedules" + singular: "backupSchedule" + }; + + // Identifier. Output only for the + // [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation. + // Required for the + // [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule] + // operation. A globally unique identifier for the backup schedule which + // cannot be changed. Values are of the form + // `projects//instances//databases//backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]` + // The final segment of the name must be between 2 and 60 characters in + // length. + string name = 1 [(google.api.field_behavior) = IDENTIFIER]; + + // Optional. The schedule specification based on which the backup creations + // are triggered. + BackupScheduleSpec spec = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The retention duration of a backup that must be at least 6 hours + // and at most 366 days. The backup is eligible to be automatically deleted + // once the retention period has elapsed. + google.protobuf.Duration retention_duration = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The encryption configuration that will be used to encrypt the + // backup. If this field is not specified, the backup will use the same + // encryption configuration as the database. + CreateBackupEncryptionConfig encryption_config = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Required. Backup type spec determines the type of backup that is created by + // the backup schedule. Currently, only full backups are supported. + oneof backup_type_spec { + // The schedule creates only full backups. + FullBackupSpec full_backup_spec = 7; + + // The schedule creates incremental backup chains. + IncrementalBackupSpec incremental_backup_spec = 8; + } + + // Output only. The timestamp at which the schedule was last updated. + // If the schedule has never been updated, this field contains the timestamp + // when the schedule was first created. + google.protobuf.Timestamp update_time = 9 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// CrontabSpec can be used to specify the version time and frequency at +// which the backup should be created. +message CrontabSpec { + // Required. Textual representation of the crontab. User can customize the + // backup frequency and the backup version time using the cron + // expression. The version time must be in UTC timzeone. + // + // The backup will contain an externally consistent copy of the + // database at the version time. Allowed frequencies are 12 hour, 1 day, + // 1 week and 1 month. Examples of valid cron specifications: + // * `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC. + // * `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC. + // * `0 2 * * * ` : once a day at 2 past midnight in UTC. + // * `0 2 * * 0 ` : once a week every Sunday at 2 past midnight in UTC. + // * `0 2 8 * * ` : once a month on 8th day at 2 past midnight in UTC. + string text = 1 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The time zone of the times in `CrontabSpec.text`. Currently + // only UTC is supported. + string time_zone = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Schedule backups will contain an externally consistent copy + // of the database at the version time specified in + // `schedule_spec.cron_spec`. However, Spanner may not initiate the creation + // of the scheduled backups at that version time. Spanner will initiate + // the creation of scheduled backups within the time window bounded by the + // version_time specified in `schedule_spec.cron_spec` and version_time + + // `creation_window`. + google.protobuf.Duration creation_window = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// The request for +// [CreateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule]. +message CreateBackupScheduleRequest { + // Required. The name of the database that this backup schedule applies to. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Database" + } + ]; + + // Required. The Id to use for the backup schedule. The `backup_schedule_id` + // appended to `parent` forms the full backup schedule name of the form + // `projects//instances//databases//backupSchedules/`. + string backup_schedule_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The backup schedule to create. + BackupSchedule backup_schedule = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// The request for +// [GetBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule]. +message GetBackupScheduleRequest { + // Required. The name of the schedule to retrieve. + // Values are of the form + // `projects//instances//databases//backupSchedules/`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/BackupSchedule" + } + ]; +} + +// The request for +// [DeleteBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule]. +message DeleteBackupScheduleRequest { + // Required. The name of the schedule to delete. + // Values are of the form + // `projects//instances//databases//backupSchedules/`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/BackupSchedule" + } + ]; +} + +// The request for +// [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]. +message ListBackupSchedulesRequest { + // Required. Database is the parent resource whose backup schedules should be + // listed. Values are of the form + // projects//instances//databases/ + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Database" + } + ]; + + // Optional. Number of backup schedules to be returned in the response. If 0 + // or less, defaults to the server's maximum allowed page size. + int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If non-empty, `page_token` should contain a + // [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token] + // from a previous + // [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse] + // to the same `parent`. + string page_token = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// The response for +// [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]. +message ListBackupSchedulesResponse { + // The list of backup schedules for a database. + repeated BackupSchedule backup_schedules = 1; + + // `next_page_token` can be sent in a subsequent + // [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules] + // call to fetch more of the schedules. + string next_page_token = 2; +} + +// The request for +// [UpdateBackupScheduleRequest][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]. +message UpdateBackupScheduleRequest { + // Required. The backup schedule to update. `backup_schedule.name`, and the + // fields to be updated as specified by `update_mask` are required. Other + // fields are ignored. + BackupSchedule backup_schedule = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. A mask specifying which fields in the BackupSchedule resource + // should be updated. This mask is relative to the BackupSchedule resource, + // not to the request message. The field mask must always be + // specified; this prevents any future fields from being erased + // accidentally. + google.protobuf.FieldMask update_mask = 2 + [(google.api.field_behavior) = REQUIRED]; +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/common.proto b/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/common.proto index 32d7519e3a4..a9101230637 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/common.proto +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/common.proto @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -58,8 +58,27 @@ message EncryptionConfig { // the database. Values are of the form // `projects//locations//keyRings//cryptoKeys/`. string kms_key_name = 2 [(google.api.resource_reference) = { - type: "cloudkms.googleapis.com/CryptoKey" - }]; + type: "cloudkms.googleapis.com/CryptoKey" + }]; + + // Specifies the KMS configuration for the one or more keys used to encrypt + // the database. Values are of the form + // `projects//locations//keyRings//cryptoKeys/`. + // + // The keys referenced by kms_key_names must fully cover all + // regions of the database instance configuration. Some examples: + // * For single region database instance configs, specify a single regional + // location KMS key. + // * For multi-regional database instance configs of type GOOGLE_MANAGED, + // either specify a multi-regional location KMS key or multiple regional + // location KMS keys that cover all regions in the instance config. + // * For a database instance config of type USER_MANAGED, please specify only + // regional location KMS keys to cover each region in the instance config. + // Multi-regional location KMS keys are not supported for USER_MANAGED + // instance configs. + repeated string kms_key_names = 3 [(google.api.resource_reference) = { + type: "cloudkms.googleapis.com/CryptoKey" + }]; } // Encryption information for a Cloud Spanner database or backup. @@ -83,13 +102,14 @@ message EncryptionInfo { // Output only. The type of encryption. Type encryption_type = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Output only. If present, the status of a recent encrypt/decrypt call on underlying data - // for this database or backup. Regardless of status, data is always encrypted - // at rest. - google.rpc.Status encryption_status = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Output only. If present, the status of a recent encrypt/decrypt call on + // underlying data for this database or backup. Regardless of status, data is + // always encrypted at rest. + google.rpc.Status encryption_status = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; - // Output only. A Cloud KMS key version that is being used to protect the database or - // backup. + // Output only. A Cloud KMS key version that is being used to protect the + // database or backup. string kms_key_version = 2 [ (google.api.field_behavior) = OUTPUT_ONLY, (google.api.resource_reference) = { @@ -104,7 +124,7 @@ enum DatabaseDialect { // GOOGLE_STANDARD_SQL dialect. DATABASE_DIALECT_UNSPECIFIED = 0; - // Google standard SQL. + // GoogleSQL supported SQL. GOOGLE_STANDARD_SQL = 1; // PostgreSQL supported SQL. diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/spanner_database_admin.proto b/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/spanner_database_admin.proto index a522c08c18a..5df142403e6 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/spanner_database_admin.proto +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/spanner_database_admin.proto @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -27,6 +27,7 @@ import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; import "google/spanner/admin/database/v1/backup.proto"; +import "google/spanner/admin/database/v1/backup_schedule.proto"; import "google/spanner/admin/database/v1/common.proto"; option csharp_namespace = "Google.Cloud.Spanner.Admin.Database.V1"; @@ -46,7 +47,7 @@ option (google.api.resource_definition) = { // The Cloud Spanner Database Admin API can be used to: // * create, drop, and list databases // * update the schema of pre-existing databases -// * create, delete and list backups for a database +// * create, delete, copy and list backups for a database // * restore a database from an existing backup service DatabaseAdmin { option (google.api.default_host) = "spanner.googleapis.com"; @@ -67,10 +68,11 @@ service DatabaseAdmin { // have a name of the format `/operations/` and // can be used to track preparation of the database. The // [metadata][google.longrunning.Operation.metadata] field type is - // [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The - // [response][google.longrunning.Operation.response] field type is + // [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. + // The [response][google.longrunning.Operation.response] field type is // [Database][google.spanner.admin.database.v1.Database], if successful. - rpc CreateDatabase(CreateDatabaseRequest) returns (google.longrunning.Operation) { + rpc CreateDatabase(CreateDatabaseRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1/{parent=projects/*/instances/*}/databases" body: "*" @@ -145,8 +147,10 @@ service DatabaseAdmin { // the format `/operations/` and can be used to // track execution of the schema change(s). The // [metadata][google.longrunning.Operation.metadata] field type is - // [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response. - rpc UpdateDatabaseDdl(UpdateDatabaseDdlRequest) returns (google.longrunning.Operation) { + // [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. + // The operation has no response. + rpc UpdateDatabaseDdl(UpdateDatabaseDdlRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { patch: "/v1/{database=projects/*/instances/*/databases/*}/ddl" body: "*" @@ -187,7 +191,8 @@ service DatabaseAdmin { // permission on [resource][google.iam.v1.SetIamPolicyRequest.resource]. // For backups, authorization requires `spanner.backups.setIamPolicy` // permission on [resource][google.iam.v1.SetIamPolicyRequest.resource]. - rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { + rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) + returns (google.iam.v1.Policy) { option (google.api.http) = { post: "/v1/{resource=projects/*/instances/*/databases/*}:setIamPolicy" body: "*" @@ -195,6 +200,10 @@ service DatabaseAdmin { post: "/v1/{resource=projects/*/instances/*/backups/*}:setIamPolicy" body: "*" } + additional_bindings { + post: "/v1/{resource=projects/*/instances/*/databases/*/backupSchedules/*}:setIamPolicy" + body: "*" + } }; option (google.api.method_signature) = "resource,policy"; } @@ -207,7 +216,8 @@ service DatabaseAdmin { // [resource][google.iam.v1.GetIamPolicyRequest.resource]. // For backups, authorization requires `spanner.backups.getIamPolicy` // permission on [resource][google.iam.v1.GetIamPolicyRequest.resource]. - rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { + rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) + returns (google.iam.v1.Policy) { option (google.api.http) = { post: "/v1/{resource=projects/*/instances/*/databases/*}:getIamPolicy" body: "*" @@ -215,6 +225,10 @@ service DatabaseAdmin { post: "/v1/{resource=projects/*/instances/*/backups/*}:getIamPolicy" body: "*" } + additional_bindings { + post: "/v1/{resource=projects/*/instances/*/databases/*/backupSchedules/*}:getIamPolicy" + body: "*" + } }; option (google.api.method_signature) = "resource"; } @@ -229,7 +243,8 @@ service DatabaseAdmin { // Calling this method on a backup that does not exist will // result in a NOT_FOUND error if the user has // `spanner.backups.list` permission on the containing instance. - rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { + rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) + returns (google.iam.v1.TestIamPermissionsResponse) { option (google.api.http) = { post: "/v1/{resource=projects/*/instances/*/databases/*}:testIamPermissions" body: "*" @@ -237,6 +252,10 @@ service DatabaseAdmin { post: "/v1/{resource=projects/*/instances/*/backups/*}:testIamPermissions" body: "*" } + additional_bindings { + post: "/v1/{resource=projects/*/instances/*/databases/*/backupSchedules/*}:testIamPermissions" + body: "*" + } additional_bindings { post: "/v1/{resource=projects/*/instances/*/databases/*/databaseRoles/*}:testIamPermissions" body: "*" @@ -251,12 +270,12 @@ service DatabaseAdmin { // `projects//instances//backups//operations/` // and can be used to track creation of the backup. The // [metadata][google.longrunning.Operation.metadata] field type is - // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. The - // [response][google.longrunning.Operation.response] field type is - // [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned operation will stop the - // creation and delete the backup. - // There can be only one pending backup creation per database. Backup creation - // of different databases can run concurrently. + // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + // The [response][google.longrunning.Operation.response] field type is + // [Backup][google.spanner.admin.database.v1.Backup], if successful. + // Cancelling the returned operation will stop the creation and delete the + // backup. There can be only one pending backup creation per database. Backup + // creation of different databases can run concurrently. rpc CreateBackup(CreateBackupRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1/{parent=projects/*/instances/*}/backups" @@ -278,22 +297,25 @@ service DatabaseAdmin { // The [metadata][google.longrunning.Operation.metadata] field type is // [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. // The [response][google.longrunning.Operation.response] field type is - // [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned operation will stop the - // copying and delete the backup. - // Concurrent CopyBackup requests can run on the same source backup. + // [Backup][google.spanner.admin.database.v1.Backup], if successful. + // Cancelling the returned operation will stop the copying and delete the + // destination backup. Concurrent CopyBackup requests can run on the same + // source backup. rpc CopyBackup(CopyBackupRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1/{parent=projects/*/instances/*}/backups:copy" body: "*" }; - option (google.api.method_signature) = "parent,backup_id,source_backup,expire_time"; + option (google.api.method_signature) = + "parent,backup_id,source_backup,expire_time"; option (google.longrunning.operation_info) = { response_type: "google.spanner.admin.database.v1.Backup" metadata_type: "google.spanner.admin.database.v1.CopyBackupMetadata" }; } - // Gets metadata on a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + // Gets metadata on a pending or completed + // [Backup][google.spanner.admin.database.v1.Backup]. rpc GetBackup(GetBackupRequest) returns (Backup) { option (google.api.http) = { get: "/v1/{name=projects/*/instances/*/backups/*}" @@ -301,7 +323,8 @@ service DatabaseAdmin { option (google.api.method_signature) = "name"; } - // Updates a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + // Updates a pending or completed + // [Backup][google.spanner.admin.database.v1.Backup]. rpc UpdateBackup(UpdateBackupRequest) returns (Backup) { option (google.api.http) = { patch: "/v1/{backup.name=projects/*/instances/*/backups/*}" @@ -310,7 +333,8 @@ service DatabaseAdmin { option (google.api.method_signature) = "backup,update_mask"; } - // Deletes a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + // Deletes a pending or completed + // [Backup][google.spanner.admin.database.v1.Backup]. rpc DeleteBackup(DeleteBackupRequest) returns (google.protobuf.Empty) { option (google.api.http) = { delete: "/v1/{name=projects/*/instances/*/backups/*}" @@ -345,7 +369,8 @@ service DatabaseAdmin { // Once the restore operation completes, a new restore operation can be // initiated, without waiting for the optimize operation associated with the // first restore to complete. - rpc RestoreDatabase(RestoreDatabaseRequest) returns (google.longrunning.Operation) { + rpc RestoreDatabase(RestoreDatabaseRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1/{parent=projects/*/instances/*}/databases:restore" body: "*" @@ -365,7 +390,8 @@ service DatabaseAdmin { // `metadata.type_url` describes the type of the metadata. Operations returned // include those that have completed/failed/canceled within the last 7 days, // and pending operations. - rpc ListDatabaseOperations(ListDatabaseOperationsRequest) returns (ListDatabaseOperationsResponse) { + rpc ListDatabaseOperations(ListDatabaseOperationsRequest) + returns (ListDatabaseOperationsResponse) { option (google.api.http) = { get: "/v1/{parent=projects/*/instances/*}/databaseOperations" }; @@ -382,7 +408,8 @@ service DatabaseAdmin { // and pending operations. Operations returned are ordered by // `operation.metadata.value.progress.start_time` in descending order starting // from the most recently started operation. - rpc ListBackupOperations(ListBackupOperationsRequest) returns (ListBackupOperationsResponse) { + rpc ListBackupOperations(ListBackupOperationsRequest) + returns (ListBackupOperationsResponse) { option (google.api.http) = { get: "/v1/{parent=projects/*/instances/*}/backupOperations" }; @@ -390,12 +417,60 @@ service DatabaseAdmin { } // Lists Cloud Spanner database roles. - rpc ListDatabaseRoles(ListDatabaseRolesRequest) returns (ListDatabaseRolesResponse) { + rpc ListDatabaseRoles(ListDatabaseRolesRequest) + returns (ListDatabaseRolesResponse) { option (google.api.http) = { get: "/v1/{parent=projects/*/instances/*/databases/*}/databaseRoles" }; option (google.api.method_signature) = "parent"; } + + // Creates a new backup schedule. + rpc CreateBackupSchedule(CreateBackupScheduleRequest) + returns (BackupSchedule) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/instances/*/databases/*}/backupSchedules" + body: "backup_schedule" + }; + option (google.api.method_signature) = + "parent,backup_schedule,backup_schedule_id"; + } + + // Gets backup schedule for the input schedule name. + rpc GetBackupSchedule(GetBackupScheduleRequest) returns (BackupSchedule) { + option (google.api.http) = { + get: "/v1/{name=projects/*/instances/*/databases/*/backupSchedules/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Updates a backup schedule. + rpc UpdateBackupSchedule(UpdateBackupScheduleRequest) + returns (BackupSchedule) { + option (google.api.http) = { + patch: "/v1/{backup_schedule.name=projects/*/instances/*/databases/*/backupSchedules/*}" + body: "backup_schedule" + }; + option (google.api.method_signature) = "backup_schedule,update_mask"; + } + + // Deletes a backup schedule. + rpc DeleteBackupSchedule(DeleteBackupScheduleRequest) + returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/{name=projects/*/instances/*/databases/*/backupSchedules/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Lists all the backup schedules for the database. + rpc ListBackupSchedules(ListBackupSchedulesRequest) + returns (ListBackupSchedulesResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*/instances/*/databases/*}/backupSchedules" + }; + option (google.api.method_signature) = "parent"; + } } // Information about the database restore. @@ -452,7 +527,8 @@ message Database { State state = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. If exists, the time at which the database creation started. - google.protobuf.Timestamp create_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp create_time = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Applicable only for restored databases. Contains information // about the restore source. @@ -462,32 +538,37 @@ message Database { // field contains the encryption configuration for the database. // For databases that are using Google default or other types of encryption, // this field is empty. - EncryptionConfig encryption_config = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + EncryptionConfig encryption_config = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. For databases that are using customer managed encryption, this // field contains the encryption information for the database, such as - // encryption state and the Cloud KMS key versions that are in use. + // all Cloud KMS key versions that are in use. The `encryption_status' field + // inside of each `EncryptionInfo` is not populated. // // For databases that are using Google default or other types of encryption, // this field is empty. // // This field is propagated lazily from the backend. There might be a delay // from when a key version is being used and when it appears in this field. - repeated EncryptionInfo encryption_info = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated EncryptionInfo encryption_info = 8 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The period in which Cloud Spanner retains all versions of data // for the database. This is the same as the value of version_retention_period // database option set using - // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. Defaults to 1 hour, - // if not set. - string version_retention_period = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. + // Defaults to 1 hour, if not set. + string version_retention_period = 6 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Earliest timestamp at which older versions of the data can be // read. This value is continuously updated by Cloud Spanner and becomes stale // the moment it is queried. If you are using this value to recover data, make // sure to account for the time from the moment when the value is queried to // the moment when you initiate the recovery. - google.protobuf.Timestamp earliest_version_time = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.protobuf.Timestamp earliest_version_time = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The read-write region which contains the database's leader // replicas. @@ -498,10 +579,13 @@ message Database { string default_leader = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The dialect of the Cloud Spanner Database. - DatabaseDialect database_dialect = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; + DatabaseDialect database_dialect = 10 + [(google.api.field_behavior) = OUTPUT_ONLY]; // Whether drop protection is enabled for this database. Defaults to false, - // if not set. + // if not set. For more details, please see how to [prevent accidental + // database + // deletion](https://cloud.google.com/spanner/docs/prevent-database-deletion). bool enable_drop_protection = 11; // Output only. If true, the database is being updated. If false, there are no @@ -509,7 +593,8 @@ message Database { bool reconciling = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; } -// The request for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. +// The request for +// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. message ListDatabasesRequest { // Required. The instance whose databases should be listed. // Values are of the form `projects//instances/`. @@ -525,23 +610,26 @@ message ListDatabasesRequest { int32 page_size = 3; // If non-empty, `page_token` should contain a - // [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] from a - // previous [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse]. + // [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] + // from a previous + // [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse]. string page_token = 4; } -// The response for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. +// The response for +// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. message ListDatabasesResponse { // Databases that matched the request. repeated Database databases = 1; // `next_page_token` can be sent in a subsequent - // [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] call to fetch more - // of the matching databases. + // [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] + // call to fetch more of the matching databases. string next_page_token = 2; } -// The request for [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. +// The request for +// [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. message CreateDatabaseRequest { // Required. The name of the instance that will serve the new database. // Values are of the form `projects//instances/`. @@ -565,10 +653,11 @@ message CreateDatabaseRequest { // if there is an error in any statement, the database is not created. repeated string extra_statements = 3 [(google.api.field_behavior) = OPTIONAL]; - // Optional. The encryption configuration for the database. If this field is not - // specified, Cloud Spanner will encrypt/decrypt all data at rest using + // Optional. The encryption configuration for the database. If this field is + // not specified, Cloud Spanner will encrypt/decrypt all data at rest using // Google default encryption. - EncryptionConfig encryption_config = 4 [(google.api.field_behavior) = OPTIONAL]; + EncryptionConfig encryption_config = 4 + [(google.api.field_behavior) = OPTIONAL]; // Optional. The dialect of the Cloud Spanner Database. DatabaseDialect database_dialect = 5 [(google.api.field_behavior) = OPTIONAL]; @@ -596,11 +685,12 @@ message CreateDatabaseRequest { message CreateDatabaseMetadata { // The database being created. string database = 1 [(google.api.resource_reference) = { - type: "spanner.googleapis.com/Database" - }]; + type: "spanner.googleapis.com/Database" + }]; } -// The request for [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase]. +// The request for +// [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase]. message GetDatabaseRequest { // Required. The name of the requested database. Values are of the form // `projects//instances//databases/`. @@ -657,8 +747,8 @@ message UpdateDatabaseMetadata { // Each batch of statements is assigned a name which can be used with // the [Operations][google.longrunning.Operations] API to monitor // progress. See the -// [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id] field for more -// details. +// [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id] +// field for more details. message UpdateDatabaseDdlRequest { // Required. The database to update. string database = 1 [ @@ -678,18 +768,20 @@ message UpdateDatabaseDdlRequest { // // Specifying an explicit operation ID simplifies determining // whether the statements were executed in the event that the - // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] call is replayed, - // or the return value is otherwise lost: the [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] and - // `operation_id` fields can be combined to form the + // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + // call is replayed, or the return value is otherwise lost: the + // [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] + // and `operation_id` fields can be combined to form the // [name][google.longrunning.Operation.name] of the resulting - // [longrunning.Operation][google.longrunning.Operation]: `/operations/`. + // [longrunning.Operation][google.longrunning.Operation]: + // `/operations/`. // // `operation_id` should be unique within the database, and must be // a valid identifier: `[a-z][a-z0-9_]*`. Note that // automatically-generated operation IDs always begin with an // underscore. If the named operation already exists, - // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] returns - // `ALREADY_EXISTS`. + // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] + // returns `ALREADY_EXISTS`. string operation_id = 3; // Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements. @@ -735,8 +827,8 @@ message DdlStatementActionInfo { message UpdateDatabaseDdlMetadata { // The database being modified. string database = 1 [(google.api.resource_reference) = { - type: "spanner.googleapis.com/Database" - }]; + type: "spanner.googleapis.com/Database" + }]; // For an update this list contains all the statements. For an // individual statement, this list contains only that statement. @@ -766,7 +858,8 @@ message UpdateDatabaseDdlMetadata { repeated DdlStatementActionInfo actions = 6; } -// The request for [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase]. +// The request for +// [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase]. message DropDatabaseRequest { // Required. The database to be dropped. string database = 1 [ @@ -777,7 +870,8 @@ message DropDatabaseRequest { ]; } -// The request for [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. +// The request for +// [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. message GetDatabaseDdlRequest { // Required. The database whose schema we wish to get. // Values are of the form @@ -790,7 +884,8 @@ message GetDatabaseDdlRequest { ]; } -// The response for [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. +// The response for +// [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. message GetDatabaseDdlResponse { // A list of formatted DDL statements defining the schema of the database // specified in the request. @@ -830,7 +925,9 @@ message ListDatabaseOperationsRequest { // * `name` - The name of the long-running operation // * `done` - False if the operation is in progress, else true. // * `metadata.@type` - the type of metadata. For example, the type string - // for [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] is + // for + // [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] + // is // `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`. // * `metadata.` - any field in metadata.value. // `metadata.@type` must be specified first, if filtering on metadata @@ -852,7 +949,8 @@ message ListDatabaseOperationsRequest { // `(metadata.name:restored_howl) AND` \ // `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \ // `(error:*)` - Return operations where: - // * The operation's metadata type is [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. + // * The operation's metadata type is + // [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. // * The database is restored from a backup. // * The backup name contains "backup_howl". // * The restored database's name contains "restored_howl". @@ -866,8 +964,9 @@ message ListDatabaseOperationsRequest { // If non-empty, `page_token` should contain a // [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token] - // from a previous [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse] to the - // same `parent` and with the same `filter`. + // from a previous + // [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse] + // to the same `parent` and with the same `filter`. string page_token = 4; } @@ -913,17 +1012,18 @@ message RestoreDatabaseRequest { // Name of the backup from which to restore. Values are of the form // `projects//instances//backups/`. string backup = 3 [(google.api.resource_reference) = { - type: "spanner.googleapis.com/Backup" - }]; + type: "spanner.googleapis.com/Backup" + }]; } - // Optional. An encryption configuration describing the encryption type and key - // resources in Cloud KMS used to encrypt/decrypt the database to restore to. - // If this field is not specified, the restored database will use - // the same encryption configuration as the backup by default, namely - // [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] = - // `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`. - RestoreDatabaseEncryptionConfig encryption_config = 4 [(google.api.field_behavior) = OPTIONAL]; + // Optional. An encryption configuration describing the encryption type and + // key resources in Cloud KMS used to encrypt/decrypt the database to restore + // to. If this field is not specified, the restored database will use the same + // encryption configuration as the backup by default, namely + // [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] + // = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`. + RestoreDatabaseEncryptionConfig encryption_config = 4 + [(google.api.field_behavior) = OPTIONAL]; } // Encryption configuration for the restored database. @@ -934,7 +1034,8 @@ message RestoreDatabaseEncryptionConfig { ENCRYPTION_TYPE_UNSPECIFIED = 0; // This is the default option when - // [encryption_config][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig] is not specified. + // [encryption_config][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig] + // is not specified. USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION = 1; // Use Google default encryption. @@ -948,10 +1049,10 @@ message RestoreDatabaseEncryptionConfig { // Required. The encryption type of the restored database. EncryptionType encryption_type = 1 [(google.api.field_behavior) = REQUIRED]; - // Optional. The Cloud KMS key that will be used to encrypt/decrypt the restored - // database. This field should be set only when - // [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] is - // `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form + // Optional. The Cloud KMS key that will be used to encrypt/decrypt the + // restored database. This field should be set only when + // [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type] + // is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form // `projects//locations//keyRings//cryptoKeys/`. string kms_key_name = 2 [ (google.api.field_behavior) = OPTIONAL, @@ -959,6 +1060,28 @@ message RestoreDatabaseEncryptionConfig { type: "cloudkms.googleapis.com/CryptoKey" } ]; + + // Optional. Specifies the KMS configuration for the one or more keys used to + // encrypt the database. Values are of the form + // `projects//locations//keyRings//cryptoKeys/`. + // + // The keys referenced by kms_key_names must fully cover all + // regions of the database instance configuration. Some examples: + // * For single region database instance configs, specify a single regional + // location KMS key. + // * For multi-regional database instance configs of type GOOGLE_MANAGED, + // either specify a multi-regional location KMS key or multiple regional + // location KMS keys that cover all regions in the instance config. + // * For a database instance config of type USER_MANAGED, please specify only + // regional location KMS keys to cover each region in the instance config. + // Multi-regional location KMS keys are not supported for USER_MANAGED + // instance configs. + repeated string kms_key_names = 3 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.resource_reference) = { + type: "cloudkms.googleapis.com/CryptoKey" + } + ]; } // Metadata type for the long-running operation returned by @@ -966,14 +1089,15 @@ message RestoreDatabaseEncryptionConfig { message RestoreDatabaseMetadata { // Name of the database being created and restored to. string name = 1 [(google.api.resource_reference) = { - type: "spanner.googleapis.com/Database" - }]; + type: "spanner.googleapis.com/Database" + }]; // The type of the restore source. RestoreSourceType source_type = 2; // Information about the source used to restore the database, as specified by - // `source` in [RestoreDatabaseRequest][google.spanner.admin.database.v1.RestoreDatabaseRequest]. + // `source` in + // [RestoreDatabaseRequest][google.spanner.admin.database.v1.RestoreDatabaseRequest]. oneof source_info { // Information about the backup used to restore the database. BackupInfo backup_info = 3; @@ -994,7 +1118,8 @@ message RestoreDatabaseMetadata { // operation completed despite cancellation. On successful cancellation, // the operation is not deleted; instead, it becomes an operation with // an [Operation.error][google.longrunning.Operation.error] value with a - // [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to `Code.CANCELLED`. + // [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to + // `Code.CANCELLED`. google.protobuf.Timestamp cancel_time = 5; // If exists, the name of the long-running operation that will be used to @@ -1004,10 +1129,10 @@ message RestoreDatabaseMetadata { // `projects//instances//databases//operations/` // where the is the name of database being created and restored to. // The metadata type of the long-running operation is - // [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. This long-running operation will be - // automatically created by the system after the RestoreDatabase long-running - // operation completes successfully. This operation will not be created if the - // restore was not successful. + // [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. + // This long-running operation will be automatically created by the system + // after the RestoreDatabase long-running operation completes successfully. + // This operation will not be created if the restore was not successful. string optimize_database_operation_name = 6; } @@ -1018,8 +1143,8 @@ message RestoreDatabaseMetadata { message OptimizeRestoredDatabaseMetadata { // Name of the restored database being optimized. string name = 1 [(google.api.resource_reference) = { - type: "spanner.googleapis.com/Database" - }]; + type: "spanner.googleapis.com/Database" + }]; // The progress of the post-restore optimizations. OperationProgress progress = 2; @@ -1042,18 +1167,17 @@ message DatabaseRole { }; // Required. The name of the database role. Values are of the form - // `projects//instances//databases//databaseRoles/ - // {role}`, where `` is as specified in the `CREATE ROLE` - // DDL statement. This name can be passed to Get/Set IAMPolicy methods to - // identify the database role. + // `projects//instances//databases//databaseRoles/` + // where `` is as specified in the `CREATE ROLE` DDL statement. string name = 1 [(google.api.field_behavior) = REQUIRED]; } -// The request for [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]. +// The request for +// [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]. message ListDatabaseRolesRequest { // Required. The database whose roles should be listed. // Values are of the form - // `projects//instances//databases//databaseRoles`. + // `projects//instances//databases/`. string parent = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { @@ -1066,12 +1190,14 @@ message ListDatabaseRolesRequest { int32 page_size = 2; // If non-empty, `page_token` should contain a - // [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token] from a - // previous [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse]. + // [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token] + // from a previous + // [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse]. string page_token = 3; } -// The response for [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]. +// The response for +// [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]. message ListDatabaseRolesResponse { // Database roles that matched the request. repeated DatabaseRole database_roles = 1; diff --git a/proto-google-cloud-spanner-admin-instance-v1/clirr-ignored-differences.xml b/proto-google-cloud-spanner-admin-instance-v1/clirr-ignored-differences.xml index b24b9e881c1..fa9181b755b 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/clirr-ignored-differences.xml +++ b/proto-google-cloud-spanner-admin-instance-v1/clirr-ignored-differences.xml @@ -16,4 +16,66 @@ com/google/spanner/admin/instance/v1/*OrBuilder boolean has*(*) + + + + 7006 + com/google/spanner/admin/instance/v1/** + * getDefaultInstanceForType() + ** + + + 7006 + com/google/spanner/admin/instance/v1/** + * addRepeatedField(*) + ** + + + 7006 + com/google/spanner/admin/instance/v1/** + * clear() + ** + + + 7006 + com/google/spanner/admin/instance/v1/** + * clearField(*) + ** + + + 7006 + com/google/spanner/admin/instance/v1/** + * clearOneof(*) + ** + + + 7006 + com/google/spanner/admin/instance/v1/** + * clone() + ** + + + 7006 + com/google/spanner/admin/instance/v1/** + * mergeUnknownFields(*) + ** + + + 7006 + com/google/spanner/admin/instance/v1/** + * setField(*) + ** + + + 7006 + com/google/spanner/admin/instance/v1/** + * setRepeatedField(*) + ** + + + 7006 + com/google/spanner/admin/instance/v1/** + * setUnknownFields(*) + ** + diff --git a/proto-google-cloud-spanner-admin-instance-v1/pom.xml b/proto-google-cloud-spanner-admin-instance-v1/pom.xml index 627f9d58617..3fdc451cff5 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/pom.xml +++ b/proto-google-cloud-spanner-admin-instance-v1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-spanner-admin-instance-v1 - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT proto-google-cloud-spanner-admin-instance-v1 PROTO library for proto-google-cloud-spanner-admin-instance-v1 com.google.cloud google-cloud-spanner-parent - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/AutoscalingConfig.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/AutoscalingConfig.java index aad1848f575..eeb39bfd3d2 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/AutoscalingConfig.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/AutoscalingConfig.java @@ -16,14 +16,14 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** * * *
    - * Autoscaling config for an instance.
    + * Autoscaling configuration for an instance.
      * 
    * * Protobuf type {@code google.spanner.admin.instance.v1.AutoscalingConfig} @@ -38,7 +38,9 @@ private AutoscalingConfig(com.google.protobuf.GeneratedMessageV3.Builder buil super(builder); } - private AutoscalingConfig() {} + private AutoscalingConfig() { + asymmetricAutoscalingOptions_ = java.util.Collections.emptyList(); + } @java.lang.Override @SuppressWarnings({"unused"}) @@ -2081,207 +2083,2615 @@ public com.google.protobuf.Parser getParserForType() { } } - private int bitField0_; - public static final int AUTOSCALING_LIMITS_FIELD_NUMBER = 1; - private com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits - autoscalingLimits_; - /** - * - * - *
    -   * Required. Autoscaling limits for an instance.
    -   * 
    - * - * - * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = REQUIRED]; - * - * - * @return Whether the autoscalingLimits field is set. - */ - @java.lang.Override - public boolean hasAutoscalingLimits() { - return ((bitField0_ & 0x00000001) != 0); - } - /** - * - * - *
    -   * Required. Autoscaling limits for an instance.
    -   * 
    - * - * - * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = REQUIRED]; - * - * - * @return The autoscalingLimits. - */ - @java.lang.Override - public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits - getAutoscalingLimits() { - return autoscalingLimits_ == null - ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits - .getDefaultInstance() - : autoscalingLimits_; - } - /** - * - * - *
    -   * Required. Autoscaling limits for an instance.
    -   * 
    - * - * - * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = REQUIRED]; - * - */ - @java.lang.Override - public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimitsOrBuilder - getAutoscalingLimitsOrBuilder() { - return autoscalingLimits_ == null - ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits - .getDefaultInstance() - : autoscalingLimits_; - } + public interface AsymmetricAutoscalingOptionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption) + com.google.protobuf.MessageOrBuilder { - public static final int AUTOSCALING_TARGETS_FIELD_NUMBER = 2; - private com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets - autoscalingTargets_; - /** - * - * - *
    -   * Required. The autoscaling targets for an instance.
    -   * 
    - * - * - * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets autoscaling_targets = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * - * @return Whether the autoscalingTargets field is set. - */ - @java.lang.Override - public boolean hasAutoscalingTargets() { - return ((bitField0_ & 0x00000002) != 0); - } - /** - * - * - *
    -   * Required. The autoscaling targets for an instance.
    -   * 
    - * - * - * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets autoscaling_targets = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * - * @return The autoscalingTargets. - */ - @java.lang.Override - public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets - getAutoscalingTargets() { - return autoscalingTargets_ == null - ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets - .getDefaultInstance() - : autoscalingTargets_; + /** + * + * + *
    +     * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +     * applies. Only read-only replicas are supported.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the replicaSelection field is set. + */ + boolean hasReplicaSelection(); + /** + * + * + *
    +     * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +     * applies. Only read-only replicas are supported.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The replicaSelection. + */ + com.google.spanner.admin.instance.v1.ReplicaSelection getReplicaSelection(); + /** + * + * + *
    +     * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +     * applies. Only read-only replicas are supported.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder getReplicaSelectionOrBuilder(); + + /** + * + * + *
    +     * Optional. Overrides applied to the top-level autoscaling configuration
    +     * for the selected replicas.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the overrides field is set. + */ + boolean hasOverrides(); + /** + * + * + *
    +     * Optional. Overrides applied to the top-level autoscaling configuration
    +     * for the selected replicas.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The overrides. + */ + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + getOverrides(); + /** + * + * + *
    +     * Optional. Overrides applied to the top-level autoscaling configuration
    +     * for the selected replicas.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverridesOrBuilder + getOverridesOrBuilder(); } /** * * *
    -   * Required. The autoscaling targets for an instance.
    +   * AsymmetricAutoscalingOption specifies the scaling of replicas identified by
    +   * the given selection.
        * 
    * - * - * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets autoscaling_targets = 2 [(.google.api.field_behavior) = REQUIRED]; - * + * Protobuf type {@code + * google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption} */ - @java.lang.Override - public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargetsOrBuilder - getAutoscalingTargetsOrBuilder() { - return autoscalingTargets_ == null - ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets - .getDefaultInstance() - : autoscalingTargets_; - } - - private byte memoizedIsInitialized = -1; - - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { - output.writeMessage(1, getAutoscalingLimits()); - } - if (((bitField0_ & 0x00000002) != 0)) { - output.writeMessage(2, getAutoscalingTargets()); + public static final class AsymmetricAutoscalingOption + extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption) + AsymmetricAutoscalingOptionOrBuilder { + private static final long serialVersionUID = 0L; + // Use AsymmetricAutoscalingOption.newBuilder() to construct. + private AsymmetricAutoscalingOption(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); } - getUnknownFields().writeTo(output); - } - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; + private AsymmetricAutoscalingOption() {} - size = 0; - if (((bitField0_ & 0x00000001) != 0)) { - size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getAutoscalingLimits()); - } - if (((bitField0_ & 0x00000002) != 0)) { - size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getAutoscalingTargets()); + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new AsymmetricAutoscalingOption(); } - size += getUnknownFields().getSerializedSize(); - memoizedSize = size; - return size; - } - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof com.google.spanner.admin.instance.v1.AutoscalingConfig)) { - return super.equals(obj); + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_descriptor; } - com.google.spanner.admin.instance.v1.AutoscalingConfig other = - (com.google.spanner.admin.instance.v1.AutoscalingConfig) obj; - if (hasAutoscalingLimits() != other.hasAutoscalingLimits()) return false; - if (hasAutoscalingLimits()) { - if (!getAutoscalingLimits().equals(other.getAutoscalingLimits())) return false; - } - if (hasAutoscalingTargets() != other.hasAutoscalingTargets()) return false; - if (hasAutoscalingTargets()) { - if (!getAutoscalingTargets().equals(other.getAutoscalingTargets())) return false; + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .class, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .Builder.class); } - if (!getUnknownFields().equals(other.getUnknownFields())) return false; - return true; - } - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - if (hasAutoscalingLimits()) { - hash = (37 * hash) + AUTOSCALING_LIMITS_FIELD_NUMBER; - hash = (53 * hash) + getAutoscalingLimits().hashCode(); - } - if (hasAutoscalingTargets()) { - hash = (37 * hash) + AUTOSCALING_TARGETS_FIELD_NUMBER; + public interface AutoscalingConfigOverridesOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +       * Optional. If specified, overrides the min/max limit in the top-level
    +       * autoscaling configuration for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the autoscalingLimits field is set. + */ + boolean hasAutoscalingLimits(); + /** + * + * + *
    +       * Optional. If specified, overrides the min/max limit in the top-level
    +       * autoscaling configuration for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoscalingLimits. + */ + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + getAutoscalingLimits(); + /** + * + * + *
    +       * Optional. If specified, overrides the min/max limit in the top-level
    +       * autoscaling configuration for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimitsOrBuilder + getAutoscalingLimitsOrBuilder(); + + /** + * + * + *
    +       * Optional. If specified, overrides the autoscaling target
    +       * high_priority_cpu_utilization_percent in the top-level autoscaling
    +       * configuration for the selected replicas.
    +       * 
    + * + * + * int32 autoscaling_target_high_priority_cpu_utilization_percent = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoscalingTargetHighPriorityCpuUtilizationPercent. + */ + int getAutoscalingTargetHighPriorityCpuUtilizationPercent(); + } + /** + * + * + *
    +     * Overrides the top-level autoscaling configuration for the replicas
    +     * identified by `replica_selection`. All fields in this message are
    +     * optional. Any unspecified fields will use the corresponding values from
    +     * the top-level autoscaling configuration.
    +     * 
    + * + * Protobuf type {@code + * google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides} + */ + public static final class AutoscalingConfigOverrides + extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides) + AutoscalingConfigOverridesOrBuilder { + private static final long serialVersionUID = 0L; + // Use AutoscalingConfigOverrides.newBuilder() to construct. + private AutoscalingConfigOverrides( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private AutoscalingConfigOverrides() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new AutoscalingConfigOverrides(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_AutoscalingConfigOverrides_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_AutoscalingConfigOverrides_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.class, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.Builder.class); + } + + private int bitField0_; + public static final int AUTOSCALING_LIMITS_FIELD_NUMBER = 1; + private com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + autoscalingLimits_; + /** + * + * + *
    +       * Optional. If specified, overrides the min/max limit in the top-level
    +       * autoscaling configuration for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the autoscalingLimits field is set. + */ + @java.lang.Override + public boolean hasAutoscalingLimits() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + *
    +       * Optional. If specified, overrides the min/max limit in the top-level
    +       * autoscaling configuration for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoscalingLimits. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + getAutoscalingLimits() { + return autoscalingLimits_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + .getDefaultInstance() + : autoscalingLimits_; + } + /** + * + * + *
    +       * Optional. If specified, overrides the min/max limit in the top-level
    +       * autoscaling configuration for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimitsOrBuilder + getAutoscalingLimitsOrBuilder() { + return autoscalingLimits_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + .getDefaultInstance() + : autoscalingLimits_; + } + + public static final int + AUTOSCALING_TARGET_HIGH_PRIORITY_CPU_UTILIZATION_PERCENT_FIELD_NUMBER = 2; + private int autoscalingTargetHighPriorityCpuUtilizationPercent_ = 0; + /** + * + * + *
    +       * Optional. If specified, overrides the autoscaling target
    +       * high_priority_cpu_utilization_percent in the top-level autoscaling
    +       * configuration for the selected replicas.
    +       * 
    + * + * + * int32 autoscaling_target_high_priority_cpu_utilization_percent = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoscalingTargetHighPriorityCpuUtilizationPercent. + */ + @java.lang.Override + public int getAutoscalingTargetHighPriorityCpuUtilizationPercent() { + return autoscalingTargetHighPriorityCpuUtilizationPercent_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getAutoscalingLimits()); + } + if (autoscalingTargetHighPriorityCpuUtilizationPercent_ != 0) { + output.writeInt32(2, autoscalingTargetHighPriorityCpuUtilizationPercent_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(1, getAutoscalingLimits()); + } + if (autoscalingTargetHighPriorityCpuUtilizationPercent_ != 0) { + size += + com.google.protobuf.CodedOutputStream.computeInt32Size( + 2, autoscalingTargetHighPriorityCpuUtilizationPercent_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + other = + (com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides) + obj; + + if (hasAutoscalingLimits() != other.hasAutoscalingLimits()) return false; + if (hasAutoscalingLimits()) { + if (!getAutoscalingLimits().equals(other.getAutoscalingLimits())) return false; + } + if (getAutoscalingTargetHighPriorityCpuUtilizationPercent() + != other.getAutoscalingTargetHighPriorityCpuUtilizationPercent()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasAutoscalingLimits()) { + hash = (37 * hash) + AUTOSCALING_LIMITS_FIELD_NUMBER; + hash = (53 * hash) + getAutoscalingLimits().hashCode(); + } + hash = (37 * hash) + AUTOSCALING_TARGET_HIGH_PRIORITY_CPU_UTILIZATION_PERCENT_FIELD_NUMBER; + hash = (53 * hash) + getAutoscalingTargetHighPriorityCpuUtilizationPercent(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
    +       * Overrides the top-level autoscaling configuration for the replicas
    +       * identified by `replica_selection`. All fields in this message are
    +       * optional. Any unspecified fields will use the corresponding values from
    +       * the top-level autoscaling configuration.
    +       * 
    + * + * Protobuf type {@code + * google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides) + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverridesOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_AutoscalingConfigOverrides_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_AutoscalingConfigOverrides_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.class, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.Builder.class); + } + + // Construct using + // com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getAutoscalingLimitsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + autoscalingLimits_ = null; + if (autoscalingLimitsBuilder_ != null) { + autoscalingLimitsBuilder_.dispose(); + autoscalingLimitsBuilder_ = null; + } + autoscalingTargetHighPriorityCpuUtilizationPercent_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_AutoscalingConfigOverrides_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + build() { + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + buildPartial() { + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + result = + new com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.autoscalingLimits_ = + autoscalingLimitsBuilder_ == null + ? autoscalingLimits_ + : autoscalingLimitsBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.autoscalingTargetHighPriorityCpuUtilizationPercent_ = + autoscalingTargetHighPriorityCpuUtilizationPercent_; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides) { + return mergeFrom( + (com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + other) { + if (other + == com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.getDefaultInstance()) return this; + if (other.hasAutoscalingLimits()) { + mergeAutoscalingLimits(other.getAutoscalingLimits()); + } + if (other.getAutoscalingTargetHighPriorityCpuUtilizationPercent() != 0) { + setAutoscalingTargetHighPriorityCpuUtilizationPercent( + other.getAutoscalingTargetHighPriorityCpuUtilizationPercent()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + getAutoscalingLimitsFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + autoscalingTargetHighPriorityCpuUtilizationPercent_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + autoscalingLimits_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimitsOrBuilder> + autoscalingLimitsBuilder_; + /** + * + * + *
    +         * Optional. If specified, overrides the min/max limit in the top-level
    +         * autoscaling configuration for the selected replicas.
    +         * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the autoscalingLimits field is set. + */ + public boolean hasAutoscalingLimits() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + *
    +         * Optional. If specified, overrides the min/max limit in the top-level
    +         * autoscaling configuration for the selected replicas.
    +         * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoscalingLimits. + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + getAutoscalingLimits() { + if (autoscalingLimitsBuilder_ == null) { + return autoscalingLimits_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + .getDefaultInstance() + : autoscalingLimits_; + } else { + return autoscalingLimitsBuilder_.getMessage(); + } + } + /** + * + * + *
    +         * Optional. If specified, overrides the min/max limit in the top-level
    +         * autoscaling configuration for the selected replicas.
    +         * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAutoscalingLimits( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits value) { + if (autoscalingLimitsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + autoscalingLimits_ = value; + } else { + autoscalingLimitsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
    +         * Optional. If specified, overrides the min/max limit in the top-level
    +         * autoscaling configuration for the selected replicas.
    +         * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAutoscalingLimits( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits.Builder + builderForValue) { + if (autoscalingLimitsBuilder_ == null) { + autoscalingLimits_ = builderForValue.build(); + } else { + autoscalingLimitsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
    +         * Optional. If specified, overrides the min/max limit in the top-level
    +         * autoscaling configuration for the selected replicas.
    +         * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeAutoscalingLimits( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits value) { + if (autoscalingLimitsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && autoscalingLimits_ != null + && autoscalingLimits_ + != com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + .getDefaultInstance()) { + getAutoscalingLimitsBuilder().mergeFrom(value); + } else { + autoscalingLimits_ = value; + } + } else { + autoscalingLimitsBuilder_.mergeFrom(value); + } + if (autoscalingLimits_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + /** + * + * + *
    +         * Optional. If specified, overrides the min/max limit in the top-level
    +         * autoscaling configuration for the selected replicas.
    +         * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearAutoscalingLimits() { + bitField0_ = (bitField0_ & ~0x00000001); + autoscalingLimits_ = null; + if (autoscalingLimitsBuilder_ != null) { + autoscalingLimitsBuilder_.dispose(); + autoscalingLimitsBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
    +         * Optional. If specified, overrides the min/max limit in the top-level
    +         * autoscaling configuration for the selected replicas.
    +         * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits.Builder + getAutoscalingLimitsBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getAutoscalingLimitsFieldBuilder().getBuilder(); + } + /** + * + * + *
    +         * Optional. If specified, overrides the min/max limit in the top-level
    +         * autoscaling configuration for the selected replicas.
    +         * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimitsOrBuilder + getAutoscalingLimitsOrBuilder() { + if (autoscalingLimitsBuilder_ != null) { + return autoscalingLimitsBuilder_.getMessageOrBuilder(); + } else { + return autoscalingLimits_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + .getDefaultInstance() + : autoscalingLimits_; + } + } + /** + * + * + *
    +         * Optional. If specified, overrides the min/max limit in the top-level
    +         * autoscaling configuration for the selected replicas.
    +         * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimitsOrBuilder> + getAutoscalingLimitsFieldBuilder() { + if (autoscalingLimitsBuilder_ == null) { + autoscalingLimitsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + .Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfig + .AutoscalingLimitsOrBuilder>( + getAutoscalingLimits(), getParentForChildren(), isClean()); + autoscalingLimits_ = null; + } + return autoscalingLimitsBuilder_; + } + + private int autoscalingTargetHighPriorityCpuUtilizationPercent_; + /** + * + * + *
    +         * Optional. If specified, overrides the autoscaling target
    +         * high_priority_cpu_utilization_percent in the top-level autoscaling
    +         * configuration for the selected replicas.
    +         * 
    + * + * + * int32 autoscaling_target_high_priority_cpu_utilization_percent = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The autoscalingTargetHighPriorityCpuUtilizationPercent. + */ + @java.lang.Override + public int getAutoscalingTargetHighPriorityCpuUtilizationPercent() { + return autoscalingTargetHighPriorityCpuUtilizationPercent_; + } + /** + * + * + *
    +         * Optional. If specified, overrides the autoscaling target
    +         * high_priority_cpu_utilization_percent in the top-level autoscaling
    +         * configuration for the selected replicas.
    +         * 
    + * + * + * int32 autoscaling_target_high_priority_cpu_utilization_percent = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The autoscalingTargetHighPriorityCpuUtilizationPercent to set. + * @return This builder for chaining. + */ + public Builder setAutoscalingTargetHighPriorityCpuUtilizationPercent(int value) { + + autoscalingTargetHighPriorityCpuUtilizationPercent_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
    +         * Optional. If specified, overrides the autoscaling target
    +         * high_priority_cpu_utilization_percent in the top-level autoscaling
    +         * configuration for the selected replicas.
    +         * 
    + * + * + * int32 autoscaling_target_high_priority_cpu_utilization_percent = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearAutoscalingTargetHighPriorityCpuUtilizationPercent() { + bitField0_ = (bitField0_ & ~0x00000002); + autoscalingTargetHighPriorityCpuUtilizationPercent_ = 0; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides) + private static final com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides(); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AutoscalingConfigOverrides parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int REPLICA_SELECTION_FIELD_NUMBER = 1; + private com.google.spanner.admin.instance.v1.ReplicaSelection replicaSelection_; + /** + * + * + *
    +     * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +     * applies. Only read-only replicas are supported.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the replicaSelection field is set. + */ + @java.lang.Override + public boolean hasReplicaSelection() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + *
    +     * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +     * applies. Only read-only replicas are supported.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The replicaSelection. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaSelection getReplicaSelection() { + return replicaSelection_ == null + ? com.google.spanner.admin.instance.v1.ReplicaSelection.getDefaultInstance() + : replicaSelection_; + } + /** + * + * + *
    +     * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +     * applies. Only read-only replicas are supported.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder + getReplicaSelectionOrBuilder() { + return replicaSelection_ == null + ? com.google.spanner.admin.instance.v1.ReplicaSelection.getDefaultInstance() + : replicaSelection_; + } + + public static final int OVERRIDES_FIELD_NUMBER = 2; + private com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + overrides_; + /** + * + * + *
    +     * Optional. Overrides applied to the top-level autoscaling configuration
    +     * for the selected replicas.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the overrides field is set. + */ + @java.lang.Override + public boolean hasOverrides() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * + * + *
    +     * Optional. Overrides applied to the top-level autoscaling configuration
    +     * for the selected replicas.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The overrides. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + getOverrides() { + return overrides_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.getDefaultInstance() + : overrides_; + } + /** + * + * + *
    +     * Optional. Overrides applied to the top-level autoscaling configuration
    +     * for the selected replicas.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverridesOrBuilder + getOverridesOrBuilder() { + return overrides_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.getDefaultInstance() + : overrides_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getReplicaSelection()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getOverrides()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getReplicaSelection()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getOverrides()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption other = + (com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption) obj; + + if (hasReplicaSelection() != other.hasReplicaSelection()) return false; + if (hasReplicaSelection()) { + if (!getReplicaSelection().equals(other.getReplicaSelection())) return false; + } + if (hasOverrides() != other.hasOverrides()) return false; + if (hasOverrides()) { + if (!getOverrides().equals(other.getOverrides())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasReplicaSelection()) { + hash = (37 * hash) + REPLICA_SELECTION_FIELD_NUMBER; + hash = (53 * hash) + getReplicaSelection().hashCode(); + } + if (hasOverrides()) { + hash = (37 * hash) + OVERRIDES_FIELD_NUMBER; + hash = (53 * hash) + getOverrides().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
    +     * AsymmetricAutoscalingOption specifies the scaling of replicas identified by
    +     * the given selection.
    +     * 
    + * + * Protobuf type {@code + * google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption) + com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOptionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .class, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .Builder.class); + } + + // Construct using + // com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getReplicaSelectionFieldBuilder(); + getOverridesFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + replicaSelection_ = null; + if (replicaSelectionBuilder_ != null) { + replicaSelectionBuilder_.dispose(); + replicaSelectionBuilder_ = null; + } + overrides_ = null; + if (overridesBuilder_ != null) { + overridesBuilder_.dispose(); + overridesBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + build() { + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + buildPartial() { + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption result = + new com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption( + this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.replicaSelection_ = + replicaSelectionBuilder_ == null + ? replicaSelection_ + : replicaSelectionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.overrides_ = overridesBuilder_ == null ? overrides_ : overridesBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption) { + return mergeFrom( + (com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + other) { + if (other + == com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .getDefaultInstance()) return this; + if (other.hasReplicaSelection()) { + mergeReplicaSelection(other.getReplicaSelection()); + } + if (other.hasOverrides()) { + mergeOverrides(other.getOverrides()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + getReplicaSelectionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(getOverridesFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.spanner.admin.instance.v1.ReplicaSelection replicaSelection_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.instance.v1.ReplicaSelection, + com.google.spanner.admin.instance.v1.ReplicaSelection.Builder, + com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder> + replicaSelectionBuilder_; + /** + * + * + *
    +       * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +       * applies. Only read-only replicas are supported.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the replicaSelection field is set. + */ + public boolean hasReplicaSelection() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + *
    +       * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +       * applies. Only read-only replicas are supported.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The replicaSelection. + */ + public com.google.spanner.admin.instance.v1.ReplicaSelection getReplicaSelection() { + if (replicaSelectionBuilder_ == null) { + return replicaSelection_ == null + ? com.google.spanner.admin.instance.v1.ReplicaSelection.getDefaultInstance() + : replicaSelection_; + } else { + return replicaSelectionBuilder_.getMessage(); + } + } + /** + * + * + *
    +       * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +       * applies. Only read-only replicas are supported.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setReplicaSelection( + com.google.spanner.admin.instance.v1.ReplicaSelection value) { + if (replicaSelectionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + replicaSelection_ = value; + } else { + replicaSelectionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
    +       * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +       * applies. Only read-only replicas are supported.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setReplicaSelection( + com.google.spanner.admin.instance.v1.ReplicaSelection.Builder builderForValue) { + if (replicaSelectionBuilder_ == null) { + replicaSelection_ = builderForValue.build(); + } else { + replicaSelectionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
    +       * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +       * applies. Only read-only replicas are supported.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeReplicaSelection( + com.google.spanner.admin.instance.v1.ReplicaSelection value) { + if (replicaSelectionBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && replicaSelection_ != null + && replicaSelection_ + != com.google.spanner.admin.instance.v1.ReplicaSelection.getDefaultInstance()) { + getReplicaSelectionBuilder().mergeFrom(value); + } else { + replicaSelection_ = value; + } + } else { + replicaSelectionBuilder_.mergeFrom(value); + } + if (replicaSelection_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + /** + * + * + *
    +       * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +       * applies. Only read-only replicas are supported.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearReplicaSelection() { + bitField0_ = (bitField0_ & ~0x00000001); + replicaSelection_ = null; + if (replicaSelectionBuilder_ != null) { + replicaSelectionBuilder_.dispose(); + replicaSelectionBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
    +       * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +       * applies. Only read-only replicas are supported.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.instance.v1.ReplicaSelection.Builder + getReplicaSelectionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getReplicaSelectionFieldBuilder().getBuilder(); + } + /** + * + * + *
    +       * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +       * applies. Only read-only replicas are supported.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder + getReplicaSelectionOrBuilder() { + if (replicaSelectionBuilder_ != null) { + return replicaSelectionBuilder_.getMessageOrBuilder(); + } else { + return replicaSelection_ == null + ? com.google.spanner.admin.instance.v1.ReplicaSelection.getDefaultInstance() + : replicaSelection_; + } + } + /** + * + * + *
    +       * Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +       * applies. Only read-only replicas are supported.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.instance.v1.ReplicaSelection, + com.google.spanner.admin.instance.v1.ReplicaSelection.Builder, + com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder> + getReplicaSelectionFieldBuilder() { + if (replicaSelectionBuilder_ == null) { + replicaSelectionBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.instance.v1.ReplicaSelection, + com.google.spanner.admin.instance.v1.ReplicaSelection.Builder, + com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder>( + getReplicaSelection(), getParentForChildren(), isClean()); + replicaSelection_ = null; + } + return replicaSelectionBuilder_; + } + + private com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + overrides_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverridesOrBuilder> + overridesBuilder_; + /** + * + * + *
    +       * Optional. Overrides applied to the top-level autoscaling configuration
    +       * for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the overrides field is set. + */ + public boolean hasOverrides() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * + * + *
    +       * Optional. Overrides applied to the top-level autoscaling configuration
    +       * for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The overrides. + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + getOverrides() { + if (overridesBuilder_ == null) { + return overrides_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.getDefaultInstance() + : overrides_; + } else { + return overridesBuilder_.getMessage(); + } + } + /** + * + * + *
    +       * Optional. Overrides applied to the top-level autoscaling configuration
    +       * for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setOverrides( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + value) { + if (overridesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + overrides_ = value; + } else { + overridesBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
    +       * Optional. Overrides applied to the top-level autoscaling configuration
    +       * for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setOverrides( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.Builder + builderForValue) { + if (overridesBuilder_ == null) { + overrides_ = builderForValue.build(); + } else { + overridesBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
    +       * Optional. Overrides applied to the top-level autoscaling configuration
    +       * for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeOverrides( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides + value) { + if (overridesBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && overrides_ != null + && overrides_ + != com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.AutoscalingConfigOverrides + .getDefaultInstance()) { + getOverridesBuilder().mergeFrom(value); + } else { + overrides_ = value; + } + } else { + overridesBuilder_.mergeFrom(value); + } + if (overrides_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + /** + * + * + *
    +       * Optional. Overrides applied to the top-level autoscaling configuration
    +       * for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearOverrides() { + bitField0_ = (bitField0_ & ~0x00000002); + overrides_ = null; + if (overridesBuilder_ != null) { + overridesBuilder_.dispose(); + overridesBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
    +       * Optional. Overrides applied to the top-level autoscaling configuration
    +       * for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.Builder + getOverridesBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getOverridesFieldBuilder().getBuilder(); + } + /** + * + * + *
    +       * Optional. Overrides applied to the top-level autoscaling configuration
    +       * for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverridesOrBuilder + getOverridesOrBuilder() { + if (overridesBuilder_ != null) { + return overridesBuilder_.getMessageOrBuilder(); + } else { + return overrides_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.getDefaultInstance() + : overrides_; + } + } + /** + * + * + *
    +       * Optional. Overrides applied to the top-level autoscaling configuration
    +       * for the selected replicas.
    +       * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.AutoscalingConfigOverrides overrides = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverridesOrBuilder> + getOverridesFieldBuilder() { + if (overridesBuilder_ == null) { + overridesBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverrides.Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .AutoscalingConfigOverridesOrBuilder>( + getOverrides(), getParentForChildren(), isClean()); + overrides_ = null; + } + return overridesBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption) + private static final com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption(); + } + + public static com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AsymmetricAutoscalingOption parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int bitField0_; + public static final int AUTOSCALING_LIMITS_FIELD_NUMBER = 1; + private com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + autoscalingLimits_; + /** + * + * + *
    +   * Required. Autoscaling limits for an instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the autoscalingLimits field is set. + */ + @java.lang.Override + public boolean hasAutoscalingLimits() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + *
    +   * Required. Autoscaling limits for an instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The autoscalingLimits. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + getAutoscalingLimits() { + return autoscalingLimits_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + .getDefaultInstance() + : autoscalingLimits_; + } + /** + * + * + *
    +   * Required. Autoscaling limits for an instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits autoscaling_limits = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimitsOrBuilder + getAutoscalingLimitsOrBuilder() { + return autoscalingLimits_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits + .getDefaultInstance() + : autoscalingLimits_; + } + + public static final int AUTOSCALING_TARGETS_FIELD_NUMBER = 2; + private com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + autoscalingTargets_; + /** + * + * + *
    +   * Required. The autoscaling targets for an instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets autoscaling_targets = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the autoscalingTargets field is set. + */ + @java.lang.Override + public boolean hasAutoscalingTargets() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * + * + *
    +   * Required. The autoscaling targets for an instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets autoscaling_targets = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The autoscalingTargets. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + getAutoscalingTargets() { + return autoscalingTargets_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + .getDefaultInstance() + : autoscalingTargets_; + } + /** + * + * + *
    +   * Required. The autoscaling targets for an instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets autoscaling_targets = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargetsOrBuilder + getAutoscalingTargetsOrBuilder() { + return autoscalingTargets_ == null + ? com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets + .getDefaultInstance() + : autoscalingTargets_; + } + + public static final int ASYMMETRIC_AUTOSCALING_OPTIONS_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private java.util.List< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption> + asymmetricAutoscalingOptions_; + /** + * + * + *
    +   * Optional. Optional asymmetric autoscaling options.
    +   * Replicas matching the replica selection criteria will be autoscaled
    +   * independently from other replicas. The autoscaler will scale the replicas
    +   * based on the utilization of replicas identified by the replica selection.
    +   * Replica selections should not overlap with each other.
    +   *
    +   * Other replicas (those do not match any replica selection) will be
    +   * autoscaled together and will have the same compute capacity allocated to
    +   * them.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption> + getAsymmetricAutoscalingOptionsList() { + return asymmetricAutoscalingOptions_; + } + /** + * + * + *
    +   * Optional. Optional asymmetric autoscaling options.
    +   * Replicas matching the replica selection criteria will be autoscaled
    +   * independently from other replicas. The autoscaler will scale the replicas
    +   * based on the utilization of replicas identified by the replica selection.
    +   * Replica selections should not overlap with each other.
    +   *
    +   * Other replicas (those do not match any replica selection) will be
    +   * autoscaled together and will have the same compute capacity allocated to
    +   * them.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public java.util.List< + ? extends + com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOptionOrBuilder> + getAsymmetricAutoscalingOptionsOrBuilderList() { + return asymmetricAutoscalingOptions_; + } + /** + * + * + *
    +   * Optional. Optional asymmetric autoscaling options.
    +   * Replicas matching the replica selection criteria will be autoscaled
    +   * independently from other replicas. The autoscaler will scale the replicas
    +   * based on the utilization of replicas identified by the replica selection.
    +   * Replica selections should not overlap with each other.
    +   *
    +   * Other replicas (those do not match any replica selection) will be
    +   * autoscaled together and will have the same compute capacity allocated to
    +   * them.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public int getAsymmetricAutoscalingOptionsCount() { + return asymmetricAutoscalingOptions_.size(); + } + /** + * + * + *
    +   * Optional. Optional asymmetric autoscaling options.
    +   * Replicas matching the replica selection criteria will be autoscaled
    +   * independently from other replicas. The autoscaler will scale the replicas
    +   * based on the utilization of replicas identified by the replica selection.
    +   * Replica selections should not overlap with each other.
    +   *
    +   * Other replicas (those do not match any replica selection) will be
    +   * autoscaled together and will have the same compute capacity allocated to
    +   * them.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + getAsymmetricAutoscalingOptions(int index) { + return asymmetricAutoscalingOptions_.get(index); + } + /** + * + * + *
    +   * Optional. Optional asymmetric autoscaling options.
    +   * Replicas matching the replica selection criteria will be autoscaled
    +   * independently from other replicas. The autoscaler will scale the replicas
    +   * based on the utilization of replicas identified by the replica selection.
    +   * Replica selections should not overlap with each other.
    +   *
    +   * Other replicas (those do not match any replica selection) will be
    +   * autoscaled together and will have the same compute capacity allocated to
    +   * them.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOptionOrBuilder + getAsymmetricAutoscalingOptionsOrBuilder(int index) { + return asymmetricAutoscalingOptions_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getAutoscalingLimits()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(2, getAutoscalingTargets()); + } + for (int i = 0; i < asymmetricAutoscalingOptions_.size(); i++) { + output.writeMessage(3, asymmetricAutoscalingOptions_.get(i)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getAutoscalingLimits()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getAutoscalingTargets()); + } + for (int i = 0; i < asymmetricAutoscalingOptions_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, asymmetricAutoscalingOptions_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.AutoscalingConfig)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.AutoscalingConfig other = + (com.google.spanner.admin.instance.v1.AutoscalingConfig) obj; + + if (hasAutoscalingLimits() != other.hasAutoscalingLimits()) return false; + if (hasAutoscalingLimits()) { + if (!getAutoscalingLimits().equals(other.getAutoscalingLimits())) return false; + } + if (hasAutoscalingTargets() != other.hasAutoscalingTargets()) return false; + if (hasAutoscalingTargets()) { + if (!getAutoscalingTargets().equals(other.getAutoscalingTargets())) return false; + } + if (!getAsymmetricAutoscalingOptionsList().equals(other.getAsymmetricAutoscalingOptionsList())) + return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasAutoscalingLimits()) { + hash = (37 * hash) + AUTOSCALING_LIMITS_FIELD_NUMBER; + hash = (53 * hash) + getAutoscalingLimits().hashCode(); + } + if (hasAutoscalingTargets()) { + hash = (37 * hash) + AUTOSCALING_TARGETS_FIELD_NUMBER; hash = (53 * hash) + getAutoscalingTargets().hashCode(); } + if (getAsymmetricAutoscalingOptionsCount() > 0) { + hash = (37 * hash) + ASYMMETRIC_AUTOSCALING_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getAsymmetricAutoscalingOptionsList().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -2387,7 +4797,7 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
    -   * Autoscaling config for an instance.
    +   * Autoscaling configuration for an instance.
        * 
    * * Protobuf type {@code google.spanner.admin.instance.v1.AutoscalingConfig} @@ -2425,6 +4835,7 @@ private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { getAutoscalingLimitsFieldBuilder(); getAutoscalingTargetsFieldBuilder(); + getAsymmetricAutoscalingOptionsFieldBuilder(); } } @@ -2442,6 +4853,13 @@ public Builder clear() { autoscalingTargetsBuilder_.dispose(); autoscalingTargetsBuilder_ = null; } + if (asymmetricAutoscalingOptionsBuilder_ == null) { + asymmetricAutoscalingOptions_ = java.util.Collections.emptyList(); + } else { + asymmetricAutoscalingOptions_ = null; + asymmetricAutoscalingOptionsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -2469,11 +4887,26 @@ public com.google.spanner.admin.instance.v1.AutoscalingConfig build() { public com.google.spanner.admin.instance.v1.AutoscalingConfig buildPartial() { com.google.spanner.admin.instance.v1.AutoscalingConfig result = new com.google.spanner.admin.instance.v1.AutoscalingConfig(this); + buildPartialRepeatedFields(result); if (bitField0_ != 0) { buildPartial0(result); } - onBuilt(); - return result; + onBuilt(); + return result; + } + + private void buildPartialRepeatedFields( + com.google.spanner.admin.instance.v1.AutoscalingConfig result) { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0)) { + asymmetricAutoscalingOptions_ = + java.util.Collections.unmodifiableList(asymmetricAutoscalingOptions_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.asymmetricAutoscalingOptions_ = asymmetricAutoscalingOptions_; + } else { + result.asymmetricAutoscalingOptions_ = asymmetricAutoscalingOptionsBuilder_.build(); + } } private void buildPartial0(com.google.spanner.admin.instance.v1.AutoscalingConfig result) { @@ -2548,6 +4981,34 @@ public Builder mergeFrom(com.google.spanner.admin.instance.v1.AutoscalingConfig if (other.hasAutoscalingTargets()) { mergeAutoscalingTargets(other.getAutoscalingTargets()); } + if (asymmetricAutoscalingOptionsBuilder_ == null) { + if (!other.asymmetricAutoscalingOptions_.isEmpty()) { + if (asymmetricAutoscalingOptions_.isEmpty()) { + asymmetricAutoscalingOptions_ = other.asymmetricAutoscalingOptions_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureAsymmetricAutoscalingOptionsIsMutable(); + asymmetricAutoscalingOptions_.addAll(other.asymmetricAutoscalingOptions_); + } + onChanged(); + } + } else { + if (!other.asymmetricAutoscalingOptions_.isEmpty()) { + if (asymmetricAutoscalingOptionsBuilder_.isEmpty()) { + asymmetricAutoscalingOptionsBuilder_.dispose(); + asymmetricAutoscalingOptionsBuilder_ = null; + asymmetricAutoscalingOptions_ = other.asymmetricAutoscalingOptions_; + bitField0_ = (bitField0_ & ~0x00000004); + asymmetricAutoscalingOptionsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getAsymmetricAutoscalingOptionsFieldBuilder() + : null; + } else { + asymmetricAutoscalingOptionsBuilder_.addAllMessages( + other.asymmetricAutoscalingOptions_); + } + } + } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; @@ -2588,6 +5049,22 @@ public Builder mergeFrom( bitField0_ |= 0x00000002; break; } // case 18 + case 26: + { + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + m = + input.readMessage( + com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption.parser(), + extensionRegistry); + if (asymmetricAutoscalingOptionsBuilder_ == null) { + ensureAsymmetricAutoscalingOptionsIsMutable(); + asymmetricAutoscalingOptions_.add(m); + } else { + asymmetricAutoscalingOptionsBuilder_.addMessage(m); + } + break; + } // case 26 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { @@ -3037,6 +5514,585 @@ public Builder clearAutoscalingTargets() { return autoscalingTargetsBuilder_; } + private java.util.List< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption> + asymmetricAutoscalingOptions_ = java.util.Collections.emptyList(); + + private void ensureAsymmetricAutoscalingOptionsIsMutable() { + if (!((bitField0_ & 0x00000004) != 0)) { + asymmetricAutoscalingOptions_ = + new java.util.ArrayList< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption>( + asymmetricAutoscalingOptions_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOptionOrBuilder> + asymmetricAutoscalingOptionsBuilder_; + + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption> + getAsymmetricAutoscalingOptionsList() { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + return java.util.Collections.unmodifiableList(asymmetricAutoscalingOptions_); + } else { + return asymmetricAutoscalingOptionsBuilder_.getMessageList(); + } + } + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public int getAsymmetricAutoscalingOptionsCount() { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + return asymmetricAutoscalingOptions_.size(); + } else { + return asymmetricAutoscalingOptionsBuilder_.getCount(); + } + } + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + getAsymmetricAutoscalingOptions(int index) { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + return asymmetricAutoscalingOptions_.get(index); + } else { + return asymmetricAutoscalingOptionsBuilder_.getMessage(index); + } + } + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAsymmetricAutoscalingOptions( + int index, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption value) { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAsymmetricAutoscalingOptionsIsMutable(); + asymmetricAutoscalingOptions_.set(index, value); + onChanged(); + } else { + asymmetricAutoscalingOptionsBuilder_.setMessage(index, value); + } + return this; + } + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setAsymmetricAutoscalingOptions( + int index, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.Builder + builderForValue) { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + ensureAsymmetricAutoscalingOptionsIsMutable(); + asymmetricAutoscalingOptions_.set(index, builderForValue.build()); + onChanged(); + } else { + asymmetricAutoscalingOptionsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAsymmetricAutoscalingOptions( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption value) { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAsymmetricAutoscalingOptionsIsMutable(); + asymmetricAutoscalingOptions_.add(value); + onChanged(); + } else { + asymmetricAutoscalingOptionsBuilder_.addMessage(value); + } + return this; + } + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAsymmetricAutoscalingOptions( + int index, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption value) { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAsymmetricAutoscalingOptionsIsMutable(); + asymmetricAutoscalingOptions_.add(index, value); + onChanged(); + } else { + asymmetricAutoscalingOptionsBuilder_.addMessage(index, value); + } + return this; + } + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAsymmetricAutoscalingOptions( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.Builder + builderForValue) { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + ensureAsymmetricAutoscalingOptionsIsMutable(); + asymmetricAutoscalingOptions_.add(builderForValue.build()); + onChanged(); + } else { + asymmetricAutoscalingOptionsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAsymmetricAutoscalingOptions( + int index, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption.Builder + builderForValue) { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + ensureAsymmetricAutoscalingOptionsIsMutable(); + asymmetricAutoscalingOptions_.add(index, builderForValue.build()); + onChanged(); + } else { + asymmetricAutoscalingOptionsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAllAsymmetricAutoscalingOptions( + java.lang.Iterable< + ? extends + com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOption> + values) { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + ensureAsymmetricAutoscalingOptionsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, asymmetricAutoscalingOptions_); + onChanged(); + } else { + asymmetricAutoscalingOptionsBuilder_.addAllMessages(values); + } + return this; + } + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearAsymmetricAutoscalingOptions() { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + asymmetricAutoscalingOptions_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + asymmetricAutoscalingOptionsBuilder_.clear(); + } + return this; + } + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeAsymmetricAutoscalingOptions(int index) { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + ensureAsymmetricAutoscalingOptionsIsMutable(); + asymmetricAutoscalingOptions_.remove(index); + onChanged(); + } else { + asymmetricAutoscalingOptionsBuilder_.remove(index); + } + return this; + } + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .Builder + getAsymmetricAutoscalingOptionsBuilder(int index) { + return getAsymmetricAutoscalingOptionsFieldBuilder().getBuilder(index); + } + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOptionOrBuilder + getAsymmetricAutoscalingOptionsOrBuilder(int index) { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + return asymmetricAutoscalingOptions_.get(index); + } else { + return asymmetricAutoscalingOptionsBuilder_.getMessageOrBuilder(index); + } + } + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List< + ? extends + com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOptionOrBuilder> + getAsymmetricAutoscalingOptionsOrBuilderList() { + if (asymmetricAutoscalingOptionsBuilder_ != null) { + return asymmetricAutoscalingOptionsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(asymmetricAutoscalingOptions_); + } + } + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .Builder + addAsymmetricAutoscalingOptionsBuilder() { + return getAsymmetricAutoscalingOptionsFieldBuilder() + .addBuilder( + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .getDefaultInstance()); + } + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .Builder + addAsymmetricAutoscalingOptionsBuilder(int index) { + return getAsymmetricAutoscalingOptionsFieldBuilder() + .addBuilder( + index, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .getDefaultInstance()); + } + /** + * + * + *
    +     * Optional. Optional asymmetric autoscaling options.
    +     * Replicas matching the replica selection criteria will be autoscaled
    +     * independently from other replicas. The autoscaler will scale the replicas
    +     * based on the utilization of replicas identified by the replica selection.
    +     * Replica selections should not overlap with each other.
    +     *
    +     * Other replicas (those do not match any replica selection) will be
    +     * autoscaled together and will have the same compute capacity allocated to
    +     * them.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .Builder> + getAsymmetricAutoscalingOptionsBuilderList() { + return getAsymmetricAutoscalingOptionsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOptionOrBuilder> + getAsymmetricAutoscalingOptionsFieldBuilder() { + if (asymmetricAutoscalingOptionsBuilder_ == null) { + asymmetricAutoscalingOptionsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption, + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + .Builder, + com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOptionOrBuilder>( + asymmetricAutoscalingOptions_, + ((bitField0_ & 0x00000004) != 0), + getParentForChildren(), + isClean()); + asymmetricAutoscalingOptions_ = null; + } + return asymmetricAutoscalingOptionsBuilder_; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/AutoscalingConfigOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/AutoscalingConfigOrBuilder.java index 04bb5155afe..9e9a6222286 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/AutoscalingConfigOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/AutoscalingConfigOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface AutoscalingConfigOrBuilder @@ -107,4 +107,112 @@ public interface AutoscalingConfigOrBuilder */ com.google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargetsOrBuilder getAutoscalingTargetsOrBuilder(); + + /** + * + * + *
    +   * Optional. Optional asymmetric autoscaling options.
    +   * Replicas matching the replica selection criteria will be autoscaled
    +   * independently from other replicas. The autoscaler will scale the replicas
    +   * based on the utilization of replicas identified by the replica selection.
    +   * Replica selections should not overlap with each other.
    +   *
    +   * Other replicas (those do not match any replica selection) will be
    +   * autoscaled together and will have the same compute capacity allocated to
    +   * them.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List + getAsymmetricAutoscalingOptionsList(); + /** + * + * + *
    +   * Optional. Optional asymmetric autoscaling options.
    +   * Replicas matching the replica selection criteria will be autoscaled
    +   * independently from other replicas. The autoscaler will scale the replicas
    +   * based on the utilization of replicas identified by the replica selection.
    +   * Replica selections should not overlap with each other.
    +   *
    +   * Other replicas (those do not match any replica selection) will be
    +   * autoscaled together and will have the same compute capacity allocated to
    +   * them.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption + getAsymmetricAutoscalingOptions(int index); + /** + * + * + *
    +   * Optional. Optional asymmetric autoscaling options.
    +   * Replicas matching the replica selection criteria will be autoscaled
    +   * independently from other replicas. The autoscaler will scale the replicas
    +   * based on the utilization of replicas identified by the replica selection.
    +   * Replica selections should not overlap with each other.
    +   *
    +   * Other replicas (those do not match any replica selection) will be
    +   * autoscaled together and will have the same compute capacity allocated to
    +   * them.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getAsymmetricAutoscalingOptionsCount(); + /** + * + * + *
    +   * Optional. Optional asymmetric autoscaling options.
    +   * Replicas matching the replica selection criteria will be autoscaled
    +   * independently from other replicas. The autoscaler will scale the replicas
    +   * based on the utilization of replicas identified by the replica selection.
    +   * Replica selections should not overlap with each other.
    +   *
    +   * Other replicas (those do not match any replica selection) will be
    +   * autoscaled together and will have the same compute capacity allocated to
    +   * them.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List< + ? extends + com.google.spanner.admin.instance.v1.AutoscalingConfig + .AsymmetricAutoscalingOptionOrBuilder> + getAsymmetricAutoscalingOptionsOrBuilderList(); + /** + * + * + *
    +   * Optional. Optional asymmetric autoscaling options.
    +   * Replicas matching the replica selection criteria will be autoscaled
    +   * independently from other replicas. The autoscaler will scale the replicas
    +   * based on the utilization of replicas identified by the replica selection.
    +   * Replica selections should not overlap with each other.
    +   *
    +   * Other replicas (those do not match any replica selection) will be
    +   * autoscaled together and will have the same compute capacity allocated to
    +   * them.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.admin.instance.v1.AutoscalingConfig.AsymmetricAutoscalingOptionOrBuilder + getAsymmetricAutoscalingOptionsOrBuilder(int index); } diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CommonProto.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CommonProto.java index e0306f96a2e..a589f16c104 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CommonProto.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CommonProto.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/common.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public final class CommonProto { @@ -32,6 +32,10 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_spanner_admin_instance_v1_OperationProgress_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_spanner_admin_instance_v1_OperationProgress_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_ReplicaSelection_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_ReplicaSelection_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; @@ -43,26 +47,28 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { java.lang.String[] descriptorData = { "\n-google/spanner/admin/instance/v1/commo" + "n.proto\022 google.spanner.admin.instance.v" - + "1\032\037google/protobuf/timestamp.proto\"\213\001\n\021O" - + "perationProgress\022\030\n\020progress_percent\030\001 \001" - + "(\005\022.\n\nstart_time\030\002 \001(\0132\032.google.protobuf" - + ".Timestamp\022,\n\010end_time\030\003 \001(\0132\032.google.pr" - + "otobuf.Timestamp*w\n\021FulfillmentPeriod\022\"\n" - + "\036FULFILLMENT_PERIOD_UNSPECIFIED\020\000\022\035\n\031FUL" - + "FILLMENT_PERIOD_NORMAL\020\001\022\037\n\033FULFILLMENT_" - + "PERIOD_EXTENDED\020\002B\375\001\n$com.google.spanner" - + ".admin.instance.v1B\013CommonProtoP\001ZFcloud" - + ".google.com/go/spanner/admin/instance/ap" - + "iv1/instancepb;instancepb\252\002&Google.Cloud" - + ".Spanner.Admin.Instance.V1\312\002&Google\\Clou" - + "d\\Spanner\\Admin\\Instance\\V1\352\002+Google::Cl" - + "oud::Spanner::Admin::Instance::V1b\006proto" - + "3" + + "1\032\037google/api/field_behavior.proto\032\037goog" + + "le/protobuf/timestamp.proto\"\213\001\n\021Operatio" + + "nProgress\022\030\n\020progress_percent\030\001 \001(\005\022.\n\ns" + + "tart_time\030\002 \001(\0132\032.google.protobuf.Timest" + + "amp\022,\n\010end_time\030\003 \001(\0132\032.google.protobuf." + + "Timestamp\")\n\020ReplicaSelection\022\025\n\010locatio" + + "n\030\001 \001(\tB\003\340A\002*w\n\021FulfillmentPeriod\022\"\n\036FUL" + + "FILLMENT_PERIOD_UNSPECIFIED\020\000\022\035\n\031FULFILL" + + "MENT_PERIOD_NORMAL\020\001\022\037\n\033FULFILLMENT_PERI" + + "OD_EXTENDED\020\002B\375\001\n$com.google.spanner.adm" + + "in.instance.v1B\013CommonProtoP\001ZFcloud.goo" + + "gle.com/go/spanner/admin/instance/apiv1/" + + "instancepb;instancepb\252\002&Google.Cloud.Spa" + + "nner.Admin.Instance.V1\312\002&Google\\Cloud\\Sp" + + "anner\\Admin\\Instance\\V1\352\002+Google::Cloud:" + + ":Spanner::Admin::Instance::V1b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.FieldBehaviorProto.getDescriptor(), com.google.protobuf.TimestampProto.getDescriptor(), }); internal_static_google_spanner_admin_instance_v1_OperationProgress_descriptor = @@ -73,6 +79,20 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new java.lang.String[] { "ProgressPercent", "StartTime", "EndTime", }); + internal_static_google_spanner_admin_instance_v1_ReplicaSelection_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_spanner_admin_instance_v1_ReplicaSelection_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_ReplicaSelection_descriptor, + new java.lang.String[] { + "Location", + }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + com.google.api.FieldBehaviorProto.getDescriptor(); com.google.protobuf.TimestampProto.getDescriptor(); } diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigMetadata.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigMetadata.java index 00c08ba9796..938fef5304c 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigMetadata.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigMetadata.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** @@ -69,7 +69,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
    -   * The target instance config end state.
    +   * The target instance configuration end state.
        * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -84,7 +84,7 @@ public boolean hasInstanceConfig() { * * *
    -   * The target instance config end state.
    +   * The target instance configuration end state.
        * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -101,7 +101,7 @@ public com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfig() { * * *
    -   * The target instance config end state.
    +   * The target instance configuration end state.
        * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -661,7 +661,7 @@ public Builder mergeFrom( * * *
    -     * The target instance config end state.
    +     * The target instance configuration end state.
          * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -675,7 +675,7 @@ public boolean hasInstanceConfig() { * * *
    -     * The target instance config end state.
    +     * The target instance configuration end state.
          * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -695,7 +695,7 @@ public com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfig() { * * *
    -     * The target instance config end state.
    +     * The target instance configuration end state.
          * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -717,7 +717,7 @@ public Builder setInstanceConfig(com.google.spanner.admin.instance.v1.InstanceCo * * *
    -     * The target instance config end state.
    +     * The target instance configuration end state.
          * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -737,7 +737,7 @@ public Builder setInstanceConfig( * * *
    -     * The target instance config end state.
    +     * The target instance configuration end state.
          * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -765,7 +765,7 @@ public Builder mergeInstanceConfig(com.google.spanner.admin.instance.v1.Instance * * *
    -     * The target instance config end state.
    +     * The target instance configuration end state.
          * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -784,7 +784,7 @@ public Builder clearInstanceConfig() { * * *
    -     * The target instance config end state.
    +     * The target instance configuration end state.
          * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -798,7 +798,7 @@ public com.google.spanner.admin.instance.v1.InstanceConfig.Builder getInstanceCo * * *
    -     * The target instance config end state.
    +     * The target instance configuration end state.
          * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -817,7 +817,7 @@ public com.google.spanner.admin.instance.v1.InstanceConfig.Builder getInstanceCo * * *
    -     * The target instance config end state.
    +     * The target instance configuration end state.
          * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigMetadataOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigMetadataOrBuilder.java index 28412d72a6a..684e4c7aba0 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigMetadataOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigMetadataOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface CreateInstanceConfigMetadataOrBuilder @@ -28,7 +28,7 @@ public interface CreateInstanceConfigMetadataOrBuilder * * *
    -   * The target instance config end state.
    +   * The target instance configuration end state.
        * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -40,7 +40,7 @@ public interface CreateInstanceConfigMetadataOrBuilder * * *
    -   * The target instance config end state.
    +   * The target instance configuration end state.
        * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -52,7 +52,7 @@ public interface CreateInstanceConfigMetadataOrBuilder * * *
    -   * The target instance config end state.
    +   * The target instance configuration end state.
        * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigRequest.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigRequest.java index 36c8ca345e4..72a0f24207f 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigRequest.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** @@ -74,8 +74,8 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
    -   * Required. The name of the project in which to create the instance config.
    -   * Values are of the form `projects/<project>`.
    +   * Required. The name of the project in which to create the instance
    +   * configuration. Values are of the form `projects/<project>`.
        * 
    * * @@ -100,8 +100,8 @@ public java.lang.String getParent() { * * *
    -   * Required. The name of the project in which to create the instance config.
    -   * Values are of the form `projects/<project>`.
    +   * Required. The name of the project in which to create the instance
    +   * configuration. Values are of the form `projects/<project>`.
        * 
    * * @@ -131,10 +131,10 @@ public com.google.protobuf.ByteString getParentBytes() { * * *
    -   * Required. The ID of the instance config to create.  Valid identifiers are
    -   * of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +   * Required. The ID of the instance configuration to create. Valid identifiers
    +   * are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
        * characters in length. The `custom-` prefix is required to avoid name
    -   * conflicts with Google managed configurations.
    +   * conflicts with Google-managed configurations.
        * 
    * * string instance_config_id = 2 [(.google.api.field_behavior) = REQUIRED]; @@ -157,10 +157,10 @@ public java.lang.String getInstanceConfigId() { * * *
    -   * Required. The ID of the instance config to create.  Valid identifiers are
    -   * of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +   * Required. The ID of the instance configuration to create. Valid identifiers
    +   * are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
        * characters in length. The `custom-` prefix is required to avoid name
    -   * conflicts with Google managed configurations.
    +   * conflicts with Google-managed configurations.
        * 
    * * string instance_config_id = 2 [(.google.api.field_behavior) = REQUIRED]; @@ -710,8 +710,8 @@ public Builder mergeFrom( * * *
    -     * Required. The name of the project in which to create the instance config.
    -     * Values are of the form `projects/<project>`.
    +     * Required. The name of the project in which to create the instance
    +     * configuration. Values are of the form `projects/<project>`.
          * 
    * * @@ -735,8 +735,8 @@ public java.lang.String getParent() { * * *
    -     * Required. The name of the project in which to create the instance config.
    -     * Values are of the form `projects/<project>`.
    +     * Required. The name of the project in which to create the instance
    +     * configuration. Values are of the form `projects/<project>`.
          * 
    * * @@ -760,8 +760,8 @@ public com.google.protobuf.ByteString getParentBytes() { * * *
    -     * Required. The name of the project in which to create the instance config.
    -     * Values are of the form `projects/<project>`.
    +     * Required. The name of the project in which to create the instance
    +     * configuration. Values are of the form `projects/<project>`.
          * 
    * * @@ -784,8 +784,8 @@ public Builder setParent(java.lang.String value) { * * *
    -     * Required. The name of the project in which to create the instance config.
    -     * Values are of the form `projects/<project>`.
    +     * Required. The name of the project in which to create the instance
    +     * configuration. Values are of the form `projects/<project>`.
          * 
    * * @@ -804,8 +804,8 @@ public Builder clearParent() { * * *
    -     * Required. The name of the project in which to create the instance config.
    -     * Values are of the form `projects/<project>`.
    +     * Required. The name of the project in which to create the instance
    +     * configuration. Values are of the form `projects/<project>`.
          * 
    * * @@ -831,10 +831,10 @@ public Builder setParentBytes(com.google.protobuf.ByteString value) { * * *
    -     * Required. The ID of the instance config to create.  Valid identifiers are
    -     * of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +     * Required. The ID of the instance configuration to create. Valid identifiers
    +     * are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
          * characters in length. The `custom-` prefix is required to avoid name
    -     * conflicts with Google managed configurations.
    +     * conflicts with Google-managed configurations.
          * 
    * * string instance_config_id = 2 [(.google.api.field_behavior) = REQUIRED]; @@ -856,10 +856,10 @@ public java.lang.String getInstanceConfigId() { * * *
    -     * Required. The ID of the instance config to create.  Valid identifiers are
    -     * of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +     * Required. The ID of the instance configuration to create. Valid identifiers
    +     * are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
          * characters in length. The `custom-` prefix is required to avoid name
    -     * conflicts with Google managed configurations.
    +     * conflicts with Google-managed configurations.
          * 
    * * string instance_config_id = 2 [(.google.api.field_behavior) = REQUIRED]; @@ -881,10 +881,10 @@ public com.google.protobuf.ByteString getInstanceConfigIdBytes() { * * *
    -     * Required. The ID of the instance config to create.  Valid identifiers are
    -     * of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +     * Required. The ID of the instance configuration to create. Valid identifiers
    +     * are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
          * characters in length. The `custom-` prefix is required to avoid name
    -     * conflicts with Google managed configurations.
    +     * conflicts with Google-managed configurations.
          * 
    * * string instance_config_id = 2 [(.google.api.field_behavior) = REQUIRED]; @@ -905,10 +905,10 @@ public Builder setInstanceConfigId(java.lang.String value) { * * *
    -     * Required. The ID of the instance config to create.  Valid identifiers are
    -     * of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +     * Required. The ID of the instance configuration to create. Valid identifiers
    +     * are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
          * characters in length. The `custom-` prefix is required to avoid name
    -     * conflicts with Google managed configurations.
    +     * conflicts with Google-managed configurations.
          * 
    * * string instance_config_id = 2 [(.google.api.field_behavior) = REQUIRED]; @@ -925,10 +925,10 @@ public Builder clearInstanceConfigId() { * * *
    -     * Required. The ID of the instance config to create.  Valid identifiers are
    -     * of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +     * Required. The ID of the instance configuration to create. Valid identifiers
    +     * are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
          * characters in length. The `custom-` prefix is required to avoid name
    -     * conflicts with Google managed configurations.
    +     * conflicts with Google-managed configurations.
          * 
    * * string instance_config_id = 2 [(.google.api.field_behavior) = REQUIRED]; diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigRequestOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigRequestOrBuilder.java index 364c98b73c4..60e1ef481e4 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceConfigRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface CreateInstanceConfigRequestOrBuilder @@ -28,8 +28,8 @@ public interface CreateInstanceConfigRequestOrBuilder * * *
    -   * Required. The name of the project in which to create the instance config.
    -   * Values are of the form `projects/<project>`.
    +   * Required. The name of the project in which to create the instance
    +   * configuration. Values are of the form `projects/<project>`.
        * 
    * * @@ -43,8 +43,8 @@ public interface CreateInstanceConfigRequestOrBuilder * * *
    -   * Required. The name of the project in which to create the instance config.
    -   * Values are of the form `projects/<project>`.
    +   * Required. The name of the project in which to create the instance
    +   * configuration. Values are of the form `projects/<project>`.
        * 
    * * @@ -59,10 +59,10 @@ public interface CreateInstanceConfigRequestOrBuilder * * *
    -   * Required. The ID of the instance config to create.  Valid identifiers are
    -   * of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +   * Required. The ID of the instance configuration to create. Valid identifiers
    +   * are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
        * characters in length. The `custom-` prefix is required to avoid name
    -   * conflicts with Google managed configurations.
    +   * conflicts with Google-managed configurations.
        * 
    * * string instance_config_id = 2 [(.google.api.field_behavior) = REQUIRED]; @@ -74,10 +74,10 @@ public interface CreateInstanceConfigRequestOrBuilder * * *
    -   * Required. The ID of the instance config to create.  Valid identifiers are
    -   * of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +   * Required. The ID of the instance configuration to create. Valid identifiers
    +   * are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
        * characters in length. The `custom-` prefix is required to avoid name
    -   * conflicts with Google managed configurations.
    +   * conflicts with Google-managed configurations.
        * 
    * * string instance_config_id = 2 [(.google.api.field_behavior) = REQUIRED]; diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceMetadata.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceMetadata.java index 64a7fdf1a1b..cafc5e39776 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceMetadata.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceMetadata.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceMetadataOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceMetadataOrBuilder.java index 2959bdc10b9..e6289c75459 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceMetadataOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceMetadataOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface CreateInstanceMetadataOrBuilder diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionMetadata.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionMetadata.java index db6b1ba2d3f..80b065a3675 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionMetadata.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionMetadata.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionMetadataOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionMetadataOrBuilder.java index 6ec2cb703d6..9d6e7f2f6c3 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionMetadataOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionMetadataOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface CreateInstancePartitionMetadataOrBuilder diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionRequest.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionRequest.java index 2e28b053b4d..fca00c08dee 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionRequest.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionRequestOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionRequestOrBuilder.java index f2a62fd61c9..777de5f7e19 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstancePartitionRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface CreateInstancePartitionRequestOrBuilder diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceRequest.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceRequest.java index 53c0d5bfcd7..8ced295fb30 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceRequest.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceRequestOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceRequestOrBuilder.java index 86120114879..7597e0d3647 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/CreateInstanceRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface CreateInstanceRequestOrBuilder diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceConfigRequest.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceConfigRequest.java index 74669a5c854..c62e5c1a242 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceConfigRequest.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceConfigRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** @@ -133,12 +133,12 @@ public com.google.protobuf.ByteString getNameBytes() { * *
        * Used for optimistic concurrency control as a way to help prevent
    -   * simultaneous deletes of an instance config from overwriting each
    +   * simultaneous deletes of an instance configuration from overwriting each
        * other. If not empty, the API
    -   * only deletes the instance config when the etag provided matches the current
    -   * status of the requested instance config. Otherwise, deletes the instance
    -   * config without checking the current status of the requested instance
    -   * config.
    +   * only deletes the instance configuration when the etag provided matches the
    +   * current status of the requested instance configuration. Otherwise, deletes
    +   * the instance configuration without checking the current status of the
    +   * requested instance configuration.
        * 
    * * string etag = 2; @@ -162,12 +162,12 @@ public java.lang.String getEtag() { * *
        * Used for optimistic concurrency control as a way to help prevent
    -   * simultaneous deletes of an instance config from overwriting each
    +   * simultaneous deletes of an instance configuration from overwriting each
        * other. If not empty, the API
    -   * only deletes the instance config when the etag provided matches the current
    -   * status of the requested instance config. Otherwise, deletes the instance
    -   * config without checking the current status of the requested instance
    -   * config.
    +   * only deletes the instance configuration when the etag provided matches the
    +   * current status of the requested instance configuration. Otherwise, deletes
    +   * the instance configuration without checking the current status of the
    +   * requested instance configuration.
        * 
    * * string etag = 2; @@ -732,12 +732,12 @@ public Builder setNameBytes(com.google.protobuf.ByteString value) { * *
          * Used for optimistic concurrency control as a way to help prevent
    -     * simultaneous deletes of an instance config from overwriting each
    +     * simultaneous deletes of an instance configuration from overwriting each
          * other. If not empty, the API
    -     * only deletes the instance config when the etag provided matches the current
    -     * status of the requested instance config. Otherwise, deletes the instance
    -     * config without checking the current status of the requested instance
    -     * config.
    +     * only deletes the instance configuration when the etag provided matches the
    +     * current status of the requested instance configuration. Otherwise, deletes
    +     * the instance configuration without checking the current status of the
    +     * requested instance configuration.
          * 
    * * string etag = 2; @@ -760,12 +760,12 @@ public java.lang.String getEtag() { * *
          * Used for optimistic concurrency control as a way to help prevent
    -     * simultaneous deletes of an instance config from overwriting each
    +     * simultaneous deletes of an instance configuration from overwriting each
          * other. If not empty, the API
    -     * only deletes the instance config when the etag provided matches the current
    -     * status of the requested instance config. Otherwise, deletes the instance
    -     * config without checking the current status of the requested instance
    -     * config.
    +     * only deletes the instance configuration when the etag provided matches the
    +     * current status of the requested instance configuration. Otherwise, deletes
    +     * the instance configuration without checking the current status of the
    +     * requested instance configuration.
          * 
    * * string etag = 2; @@ -788,12 +788,12 @@ public com.google.protobuf.ByteString getEtagBytes() { * *
          * Used for optimistic concurrency control as a way to help prevent
    -     * simultaneous deletes of an instance config from overwriting each
    +     * simultaneous deletes of an instance configuration from overwriting each
          * other. If not empty, the API
    -     * only deletes the instance config when the etag provided matches the current
    -     * status of the requested instance config. Otherwise, deletes the instance
    -     * config without checking the current status of the requested instance
    -     * config.
    +     * only deletes the instance configuration when the etag provided matches the
    +     * current status of the requested instance configuration. Otherwise, deletes
    +     * the instance configuration without checking the current status of the
    +     * requested instance configuration.
          * 
    * * string etag = 2; @@ -815,12 +815,12 @@ public Builder setEtag(java.lang.String value) { * *
          * Used for optimistic concurrency control as a way to help prevent
    -     * simultaneous deletes of an instance config from overwriting each
    +     * simultaneous deletes of an instance configuration from overwriting each
          * other. If not empty, the API
    -     * only deletes the instance config when the etag provided matches the current
    -     * status of the requested instance config. Otherwise, deletes the instance
    -     * config without checking the current status of the requested instance
    -     * config.
    +     * only deletes the instance configuration when the etag provided matches the
    +     * current status of the requested instance configuration. Otherwise, deletes
    +     * the instance configuration without checking the current status of the
    +     * requested instance configuration.
          * 
    * * string etag = 2; @@ -838,12 +838,12 @@ public Builder clearEtag() { * *
          * Used for optimistic concurrency control as a way to help prevent
    -     * simultaneous deletes of an instance config from overwriting each
    +     * simultaneous deletes of an instance configuration from overwriting each
          * other. If not empty, the API
    -     * only deletes the instance config when the etag provided matches the current
    -     * status of the requested instance config. Otherwise, deletes the instance
    -     * config without checking the current status of the requested instance
    -     * config.
    +     * only deletes the instance configuration when the etag provided matches the
    +     * current status of the requested instance configuration. Otherwise, deletes
    +     * the instance configuration without checking the current status of the
    +     * requested instance configuration.
          * 
    * * string etag = 2; diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceConfigRequestOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceConfigRequestOrBuilder.java index b33735cc031..1d45a407835 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceConfigRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceConfigRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface DeleteInstanceConfigRequestOrBuilder @@ -62,12 +62,12 @@ public interface DeleteInstanceConfigRequestOrBuilder * *
        * Used for optimistic concurrency control as a way to help prevent
    -   * simultaneous deletes of an instance config from overwriting each
    +   * simultaneous deletes of an instance configuration from overwriting each
        * other. If not empty, the API
    -   * only deletes the instance config when the etag provided matches the current
    -   * status of the requested instance config. Otherwise, deletes the instance
    -   * config without checking the current status of the requested instance
    -   * config.
    +   * only deletes the instance configuration when the etag provided matches the
    +   * current status of the requested instance configuration. Otherwise, deletes
    +   * the instance configuration without checking the current status of the
    +   * requested instance configuration.
        * 
    * * string etag = 2; @@ -80,12 +80,12 @@ public interface DeleteInstanceConfigRequestOrBuilder * *
        * Used for optimistic concurrency control as a way to help prevent
    -   * simultaneous deletes of an instance config from overwriting each
    +   * simultaneous deletes of an instance configuration from overwriting each
        * other. If not empty, the API
    -   * only deletes the instance config when the etag provided matches the current
    -   * status of the requested instance config. Otherwise, deletes the instance
    -   * config without checking the current status of the requested instance
    -   * config.
    +   * only deletes the instance configuration when the etag provided matches the
    +   * current status of the requested instance configuration. Otherwise, deletes
    +   * the instance configuration without checking the current status of the
    +   * requested instance configuration.
        * 
    * * string etag = 2; diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstancePartitionRequest.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstancePartitionRequest.java index 82386505414..6ef1d4e07c4 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstancePartitionRequest.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstancePartitionRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstancePartitionRequestOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstancePartitionRequestOrBuilder.java index 62a0f71e13e..8213609c027 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstancePartitionRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstancePartitionRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface DeleteInstancePartitionRequestOrBuilder diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceRequest.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceRequest.java index f5bf320cf6b..dc0a5e4f89e 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceRequest.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceRequestOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceRequestOrBuilder.java index 27c43ab6c1b..6975e3794d0 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/DeleteInstanceRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface DeleteInstanceRequestOrBuilder diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/FulfillmentPeriod.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/FulfillmentPeriod.java index 92d5f2aa30b..191417eaa29 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/FulfillmentPeriod.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/FulfillmentPeriod.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/common.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceConfigRequest.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceConfigRequest.java index 204b648031f..4443c12e547 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceConfigRequest.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceConfigRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceConfigRequestOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceConfigRequestOrBuilder.java index d5773edcbd8..67256df4aee 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceConfigRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceConfigRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface GetInstanceConfigRequestOrBuilder diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstancePartitionRequest.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstancePartitionRequest.java index fda9c8e5d12..e83cf6b6ee0 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstancePartitionRequest.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstancePartitionRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstancePartitionRequestOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstancePartitionRequestOrBuilder.java index 9f4adeeac7e..ddaecc1fcdd 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstancePartitionRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstancePartitionRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface GetInstancePartitionRequestOrBuilder diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceRequest.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceRequest.java index a077bb80513..8556c16b633 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceRequest.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceRequestOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceRequestOrBuilder.java index 56906b93f88..4daf800fd01 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/GetInstanceRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface GetInstanceRequestOrBuilder diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/Instance.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/Instance.java index 03f4081280b..1caf9592536 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/Instance.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/Instance.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** @@ -42,8 +42,10 @@ private Instance() { name_ = ""; config_ = ""; displayName_ = ""; + replicaComputeCapacity_ = java.util.Collections.emptyList(); state_ = 0; endpointUris_ = com.google.protobuf.LazyStringArrayList.emptyList(); + edition_ = 0; } @java.lang.Override @@ -242,6 +244,186 @@ private State(int value) { // @@protoc_insertion_point(enum_scope:google.spanner.admin.instance.v1.Instance.State) } + /** + * + * + *
    +   * The edition selected for this instance. Different editions provide
    +   * different capabilities at different price points.
    +   * 
    + * + * Protobuf enum {@code google.spanner.admin.instance.v1.Instance.Edition} + */ + public enum Edition implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Edition not specified.
    +     * 
    + * + * EDITION_UNSPECIFIED = 0; + */ + EDITION_UNSPECIFIED(0), + /** + * + * + *
    +     * Standard edition.
    +     * 
    + * + * STANDARD = 1; + */ + STANDARD(1), + /** + * + * + *
    +     * Enterprise edition.
    +     * 
    + * + * ENTERPRISE = 2; + */ + ENTERPRISE(2), + /** + * + * + *
    +     * Enterprise Plus edition.
    +     * 
    + * + * ENTERPRISE_PLUS = 3; + */ + ENTERPRISE_PLUS(3), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
    +     * Edition not specified.
    +     * 
    + * + * EDITION_UNSPECIFIED = 0; + */ + public static final int EDITION_UNSPECIFIED_VALUE = 0; + /** + * + * + *
    +     * Standard edition.
    +     * 
    + * + * STANDARD = 1; + */ + public static final int STANDARD_VALUE = 1; + /** + * + * + *
    +     * Enterprise edition.
    +     * 
    + * + * ENTERPRISE = 2; + */ + public static final int ENTERPRISE_VALUE = 2; + /** + * + * + *
    +     * Enterprise Plus edition.
    +     * 
    + * + * ENTERPRISE_PLUS = 3; + */ + public static final int ENTERPRISE_PLUS_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Edition valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Edition forNumber(int value) { + switch (value) { + case 0: + return EDITION_UNSPECIFIED; + case 1: + return STANDARD; + case 2: + return ENTERPRISE; + case 3: + return ENTERPRISE_PLUS; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Edition findValueByNumber(int number) { + return Edition.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.Instance.getDescriptor().getEnumTypes().get(1); + } + + private static final Edition[] VALUES = values(); + + public static Edition valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Edition(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.admin.instance.v1.Instance.Edition) + } + private int bitField0_; public static final int NAME_FIELD_NUMBER = 1; @@ -420,18 +602,25 @@ public com.google.protobuf.ByteString getDisplayNameBytes() { * * *
    -   * The number of nodes allocated to this instance. At most one of either
    -   * node_count or processing_units should be present in the message.
    +   * The number of nodes allocated to this instance. At most, one of either
    +   * `node_count` or `processing_units` should be present in the message.
        *
    -   * Users can set the node_count field to specify the target number of nodes
    +   * Users can set the `node_count` field to specify the target number of nodes
        * allocated to the instance.
        *
    -   * This may be zero in API responses for instances that are not yet in state
    -   * `READY`.
    +   * If autoscaling is enabled, `node_count` is treated as an `OUTPUT_ONLY`
    +   * field and reflects the current number of nodes allocated to the instance.
    +   *
    +   * This might be zero in API responses for instances that are not yet in the
    +   * `READY` state.
        *
    -   * See [the
    -   * documentation](https://cloud.google.com/spanner/docs/compute-capacity)
    -   * for more information about nodes and processing units.
    +   * If the instance has varying node count across replicas (achieved by
    +   * setting asymmetric_autoscaling_options in autoscaling config), the
    +   * node_count here is the maximum node count across all replicas.
    +   *
    +   * For more information, see
    +   * [Compute capacity, nodes, and processing
    +   * units](https://cloud.google.com/spanner/docs/compute-capacity).
        * 
    * * int32 node_count = 5; @@ -449,18 +638,27 @@ public int getNodeCount() { * * *
    -   * The number of processing units allocated to this instance. At most one of
    -   * processing_units or node_count should be present in the message.
    +   * The number of processing units allocated to this instance. At most, one of
    +   * either `processing_units` or `node_count` should be present in the message.
        *
    -   * Users can set the processing_units field to specify the target number of
    +   * Users can set the `processing_units` field to specify the target number of
        * processing units allocated to the instance.
        *
    -   * This may be zero in API responses for instances that are not yet in state
    -   * `READY`.
    +   * If autoscaling is enabled, `processing_units` is treated as an
    +   * `OUTPUT_ONLY` field and reflects the current number of processing units
    +   * allocated to the instance.
    +   *
    +   * This might be zero in API responses for instances that are not yet in the
    +   * `READY` state.
    +   *
    +   * If the instance has varying processing units per replica
    +   * (achieved by setting asymmetric_autoscaling_options in autoscaling config),
    +   * the processing_units here is the maximum processing units across all
    +   * replicas.
        *
    -   * See [the
    -   * documentation](https://cloud.google.com/spanner/docs/compute-capacity)
    -   * for more information about nodes and processing units.
    +   * For more information, see
    +   * [Compute capacity, nodes and processing
    +   * units](https://cloud.google.com/spanner/docs/compute-capacity).
        * 
    * * int32 processing_units = 9; @@ -472,6 +670,102 @@ public int getProcessingUnits() { return processingUnits_; } + public static final int REPLICA_COMPUTE_CAPACITY_FIELD_NUMBER = 19; + + @SuppressWarnings("serial") + private java.util.List + replicaComputeCapacity_; + /** + * + * + *
    +   * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +   * selection identifies a set of replicas with common properties. Replicas
    +   * identified by a ReplicaSelection are scaled with the same compute capacity.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List + getReplicaComputeCapacityList() { + return replicaComputeCapacity_; + } + /** + * + * + *
    +   * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +   * selection identifies a set of replicas with common properties. Replicas
    +   * identified by a ReplicaSelection are scaled with the same compute capacity.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public java.util.List< + ? extends com.google.spanner.admin.instance.v1.ReplicaComputeCapacityOrBuilder> + getReplicaComputeCapacityOrBuilderList() { + return replicaComputeCapacity_; + } + /** + * + * + *
    +   * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +   * selection identifies a set of replicas with common properties. Replicas
    +   * identified by a ReplicaSelection are scaled with the same compute capacity.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public int getReplicaComputeCapacityCount() { + return replicaComputeCapacity_.size(); + } + /** + * + * + *
    +   * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +   * selection identifies a set of replicas with common properties. Replicas
    +   * identified by a ReplicaSelection are scaled with the same compute capacity.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaComputeCapacity getReplicaComputeCapacity( + int index) { + return replicaComputeCapacity_.get(index); + } + /** + * + * + *
    +   * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +   * selection identifies a set of replicas with common properties. Replicas
    +   * identified by a ReplicaSelection are scaled with the same compute capacity.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaComputeCapacityOrBuilder + getReplicaComputeCapacityOrBuilder(int index) { + return replicaComputeCapacity_.get(index); + } + public static final int AUTOSCALING_CONFIG_FIELD_NUMBER = 17; private com.google.spanner.admin.instance.v1.AutoscalingConfig autoscalingConfig_; /** @@ -931,6 +1225,47 @@ public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { return updateTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : updateTime_; } + public static final int EDITION_FIELD_NUMBER = 20; + private int edition_ = 0; + /** + * + * + *
    +   * Optional. The `Edition` of the current instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for edition. + */ + @java.lang.Override + public int getEditionValue() { + return edition_; + } + /** + * + * + *
    +   * Optional. The `Edition` of the current instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The edition. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance.Edition getEdition() { + com.google.spanner.admin.instance.v1.Instance.Edition result = + com.google.spanner.admin.instance.v1.Instance.Edition.forNumber(edition_); + return result == null + ? com.google.spanner.admin.instance.v1.Instance.Edition.UNRECOGNIZED + : result; + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -978,6 +1313,13 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(17, getAutoscalingConfig()); } + for (int i = 0; i < replicaComputeCapacity_.size(); i++) { + output.writeMessage(19, replicaComputeCapacity_.get(i)); + } + if (edition_ + != com.google.spanner.admin.instance.v1.Instance.Edition.EDITION_UNSPECIFIED.getNumber()) { + output.writeEnum(20, edition_); + } getUnknownFields().writeTo(output); } @@ -1033,6 +1375,15 @@ public int getSerializedSize() { if (((bitField0_ & 0x00000001) != 0)) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(17, getAutoscalingConfig()); } + for (int i = 0; i < replicaComputeCapacity_.size(); i++) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 19, replicaComputeCapacity_.get(i)); + } + if (edition_ + != com.google.spanner.admin.instance.v1.Instance.Edition.EDITION_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(20, edition_); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -1054,6 +1405,8 @@ public boolean equals(final java.lang.Object obj) { if (!getDisplayName().equals(other.getDisplayName())) return false; if (getNodeCount() != other.getNodeCount()) return false; if (getProcessingUnits() != other.getProcessingUnits()) return false; + if (!getReplicaComputeCapacityList().equals(other.getReplicaComputeCapacityList())) + return false; if (hasAutoscalingConfig() != other.hasAutoscalingConfig()) return false; if (hasAutoscalingConfig()) { if (!getAutoscalingConfig().equals(other.getAutoscalingConfig())) return false; @@ -1069,6 +1422,7 @@ public boolean equals(final java.lang.Object obj) { if (hasUpdateTime()) { if (!getUpdateTime().equals(other.getUpdateTime())) return false; } + if (edition_ != other.edition_) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @@ -1090,6 +1444,10 @@ public int hashCode() { hash = (53 * hash) + getNodeCount(); hash = (37 * hash) + PROCESSING_UNITS_FIELD_NUMBER; hash = (53 * hash) + getProcessingUnits(); + if (getReplicaComputeCapacityCount() > 0) { + hash = (37 * hash) + REPLICA_COMPUTE_CAPACITY_FIELD_NUMBER; + hash = (53 * hash) + getReplicaComputeCapacityList().hashCode(); + } if (hasAutoscalingConfig()) { hash = (37 * hash) + AUTOSCALING_CONFIG_FIELD_NUMBER; hash = (53 * hash) + getAutoscalingConfig().hashCode(); @@ -1112,6 +1470,8 @@ public int hashCode() { hash = (37 * hash) + UPDATE_TIME_FIELD_NUMBER; hash = (53 * hash) + getUpdateTime().hashCode(); } + hash = (37 * hash) + EDITION_FIELD_NUMBER; + hash = (53 * hash) + edition_; hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -1274,6 +1634,7 @@ private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getReplicaComputeCapacityFieldBuilder(); getAutoscalingConfigFieldBuilder(); getCreateTimeFieldBuilder(); getUpdateTimeFieldBuilder(); @@ -1289,6 +1650,13 @@ public Builder clear() { displayName_ = ""; nodeCount_ = 0; processingUnits_ = 0; + if (replicaComputeCapacityBuilder_ == null) { + replicaComputeCapacity_ = java.util.Collections.emptyList(); + } else { + replicaComputeCapacity_ = null; + replicaComputeCapacityBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); autoscalingConfig_ = null; if (autoscalingConfigBuilder_ != null) { autoscalingConfigBuilder_.dispose(); @@ -1307,6 +1675,7 @@ public Builder clear() { updateTimeBuilder_.dispose(); updateTimeBuilder_ = null; } + edition_ = 0; return this; } @@ -1334,6 +1703,7 @@ public com.google.spanner.admin.instance.v1.Instance build() { public com.google.spanner.admin.instance.v1.Instance buildPartial() { com.google.spanner.admin.instance.v1.Instance result = new com.google.spanner.admin.instance.v1.Instance(this); + buildPartialRepeatedFields(result); if (bitField0_ != 0) { buildPartial0(result); } @@ -1341,6 +1711,18 @@ public com.google.spanner.admin.instance.v1.Instance buildPartial() { return result; } + private void buildPartialRepeatedFields(com.google.spanner.admin.instance.v1.Instance result) { + if (replicaComputeCapacityBuilder_ == null) { + if (((bitField0_ & 0x00000020) != 0)) { + replicaComputeCapacity_ = java.util.Collections.unmodifiableList(replicaComputeCapacity_); + bitField0_ = (bitField0_ & ~0x00000020); + } + result.replicaComputeCapacity_ = replicaComputeCapacity_; + } else { + result.replicaComputeCapacity_ = replicaComputeCapacityBuilder_.build(); + } + } + private void buildPartial0(com.google.spanner.admin.instance.v1.Instance result) { int from_bitField0_ = bitField0_; if (((from_bitField0_ & 0x00000001) != 0)) { @@ -1359,32 +1741,35 @@ private void buildPartial0(com.google.spanner.admin.instance.v1.Instance result) result.processingUnits_ = processingUnits_; } int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000020) != 0)) { + if (((from_bitField0_ & 0x00000040) != 0)) { result.autoscalingConfig_ = autoscalingConfigBuilder_ == null ? autoscalingConfig_ : autoscalingConfigBuilder_.build(); to_bitField0_ |= 0x00000001; } - if (((from_bitField0_ & 0x00000040) != 0)) { + if (((from_bitField0_ & 0x00000080) != 0)) { result.state_ = state_; } - if (((from_bitField0_ & 0x00000080) != 0)) { + if (((from_bitField0_ & 0x00000100) != 0)) { result.labels_ = internalGetLabels(); result.labels_.makeImmutable(); } - if (((from_bitField0_ & 0x00000100) != 0)) { + if (((from_bitField0_ & 0x00000200) != 0)) { endpointUris_.makeImmutable(); result.endpointUris_ = endpointUris_; } - if (((from_bitField0_ & 0x00000200) != 0)) { + if (((from_bitField0_ & 0x00000400) != 0)) { result.createTime_ = createTimeBuilder_ == null ? createTime_ : createTimeBuilder_.build(); to_bitField0_ |= 0x00000002; } - if (((from_bitField0_ & 0x00000400) != 0)) { + if (((from_bitField0_ & 0x00000800) != 0)) { result.updateTime_ = updateTimeBuilder_ == null ? updateTime_ : updateTimeBuilder_.build(); to_bitField0_ |= 0x00000004; } + if (((from_bitField0_ & 0x00001000) != 0)) { + result.edition_ = edition_; + } result.bitField0_ |= to_bitField0_; } @@ -1454,6 +1839,33 @@ public Builder mergeFrom(com.google.spanner.admin.instance.v1.Instance other) { if (other.getProcessingUnits() != 0) { setProcessingUnits(other.getProcessingUnits()); } + if (replicaComputeCapacityBuilder_ == null) { + if (!other.replicaComputeCapacity_.isEmpty()) { + if (replicaComputeCapacity_.isEmpty()) { + replicaComputeCapacity_ = other.replicaComputeCapacity_; + bitField0_ = (bitField0_ & ~0x00000020); + } else { + ensureReplicaComputeCapacityIsMutable(); + replicaComputeCapacity_.addAll(other.replicaComputeCapacity_); + } + onChanged(); + } + } else { + if (!other.replicaComputeCapacity_.isEmpty()) { + if (replicaComputeCapacityBuilder_.isEmpty()) { + replicaComputeCapacityBuilder_.dispose(); + replicaComputeCapacityBuilder_ = null; + replicaComputeCapacity_ = other.replicaComputeCapacity_; + bitField0_ = (bitField0_ & ~0x00000020); + replicaComputeCapacityBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getReplicaComputeCapacityFieldBuilder() + : null; + } else { + replicaComputeCapacityBuilder_.addAllMessages(other.replicaComputeCapacity_); + } + } + } if (other.hasAutoscalingConfig()) { mergeAutoscalingConfig(other.getAutoscalingConfig()); } @@ -1461,11 +1873,11 @@ public Builder mergeFrom(com.google.spanner.admin.instance.v1.Instance other) { setStateValue(other.getStateValue()); } internalGetMutableLabels().mergeFrom(other.internalGetLabels()); - bitField0_ |= 0x00000080; + bitField0_ |= 0x00000100; if (!other.endpointUris_.isEmpty()) { if (endpointUris_.isEmpty()) { endpointUris_ = other.endpointUris_; - bitField0_ |= 0x00000100; + bitField0_ |= 0x00000200; } else { ensureEndpointUrisIsMutable(); endpointUris_.addAll(other.endpointUris_); @@ -1478,6 +1890,9 @@ public Builder mergeFrom(com.google.spanner.admin.instance.v1.Instance other) { if (other.hasUpdateTime()) { mergeUpdateTime(other.getUpdateTime()); } + if (other.edition_ != 0) { + setEditionValue(other.getEditionValue()); + } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; @@ -1531,7 +1946,7 @@ public Builder mergeFrom( case 48: { state_ = input.readEnum(); - bitField0_ |= 0x00000040; + bitField0_ |= 0x00000080; break; } // case 48 case 58: @@ -1543,7 +1958,7 @@ public Builder mergeFrom( internalGetMutableLabels() .getMutableMap() .put(labels__.getKey(), labels__.getValue()); - bitField0_ |= 0x00000080; + bitField0_ |= 0x00000100; break; } // case 58 case 66: @@ -1562,22 +1977,42 @@ public Builder mergeFrom( case 90: { input.readMessage(getCreateTimeFieldBuilder().getBuilder(), extensionRegistry); - bitField0_ |= 0x00000200; + bitField0_ |= 0x00000400; break; } // case 90 case 98: { input.readMessage(getUpdateTimeFieldBuilder().getBuilder(), extensionRegistry); - bitField0_ |= 0x00000400; + bitField0_ |= 0x00000800; break; } // case 98 case 138: { input.readMessage( getAutoscalingConfigFieldBuilder().getBuilder(), extensionRegistry); - bitField0_ |= 0x00000020; + bitField0_ |= 0x00000040; break; } // case 138 + case 154: + { + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity m = + input.readMessage( + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.parser(), + extensionRegistry); + if (replicaComputeCapacityBuilder_ == null) { + ensureReplicaComputeCapacityIsMutable(); + replicaComputeCapacity_.add(m); + } else { + replicaComputeCapacityBuilder_.addMessage(m); + } + break; + } // case 154 + case 160: + { + edition_ = input.readEnum(); + bitField0_ |= 0x00001000; + break; + } // case 160 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { @@ -1940,196 +2375,687 @@ public Builder clearDisplayName() { * * *
    -     * Required. The descriptive name for this instance as it appears in UIs.
    -     * Must be unique per project and between 4 and 30 characters in length.
    +     * Required. The descriptive name for this instance as it appears in UIs.
    +     * Must be unique per project and between 4 and 30 characters in length.
    +     * 
    + * + * string display_name = 3 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for displayName to set. + * @return This builder for chaining. + */ + public Builder setDisplayNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + displayName_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private int nodeCount_; + /** + * + * + *
    +     * The number of nodes allocated to this instance. At most, one of either
    +     * `node_count` or `processing_units` should be present in the message.
    +     *
    +     * Users can set the `node_count` field to specify the target number of nodes
    +     * allocated to the instance.
    +     *
    +     * If autoscaling is enabled, `node_count` is treated as an `OUTPUT_ONLY`
    +     * field and reflects the current number of nodes allocated to the instance.
    +     *
    +     * This might be zero in API responses for instances that are not yet in the
    +     * `READY` state.
    +     *
    +     * If the instance has varying node count across replicas (achieved by
    +     * setting asymmetric_autoscaling_options in autoscaling config), the
    +     * node_count here is the maximum node count across all replicas.
    +     *
    +     * For more information, see
    +     * [Compute capacity, nodes, and processing
    +     * units](https://cloud.google.com/spanner/docs/compute-capacity).
    +     * 
    + * + * int32 node_count = 5; + * + * @return The nodeCount. + */ + @java.lang.Override + public int getNodeCount() { + return nodeCount_; + } + /** + * + * + *
    +     * The number of nodes allocated to this instance. At most, one of either
    +     * `node_count` or `processing_units` should be present in the message.
    +     *
    +     * Users can set the `node_count` field to specify the target number of nodes
    +     * allocated to the instance.
    +     *
    +     * If autoscaling is enabled, `node_count` is treated as an `OUTPUT_ONLY`
    +     * field and reflects the current number of nodes allocated to the instance.
    +     *
    +     * This might be zero in API responses for instances that are not yet in the
    +     * `READY` state.
    +     *
    +     * If the instance has varying node count across replicas (achieved by
    +     * setting asymmetric_autoscaling_options in autoscaling config), the
    +     * node_count here is the maximum node count across all replicas.
    +     *
    +     * For more information, see
    +     * [Compute capacity, nodes, and processing
    +     * units](https://cloud.google.com/spanner/docs/compute-capacity).
    +     * 
    + * + * int32 node_count = 5; + * + * @param value The nodeCount to set. + * @return This builder for chaining. + */ + public Builder setNodeCount(int value) { + + nodeCount_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + /** + * + * + *
    +     * The number of nodes allocated to this instance. At most, one of either
    +     * `node_count` or `processing_units` should be present in the message.
    +     *
    +     * Users can set the `node_count` field to specify the target number of nodes
    +     * allocated to the instance.
    +     *
    +     * If autoscaling is enabled, `node_count` is treated as an `OUTPUT_ONLY`
    +     * field and reflects the current number of nodes allocated to the instance.
    +     *
    +     * This might be zero in API responses for instances that are not yet in the
    +     * `READY` state.
    +     *
    +     * If the instance has varying node count across replicas (achieved by
    +     * setting asymmetric_autoscaling_options in autoscaling config), the
    +     * node_count here is the maximum node count across all replicas.
    +     *
    +     * For more information, see
    +     * [Compute capacity, nodes, and processing
    +     * units](https://cloud.google.com/spanner/docs/compute-capacity).
    +     * 
    + * + * int32 node_count = 5; + * + * @return This builder for chaining. + */ + public Builder clearNodeCount() { + bitField0_ = (bitField0_ & ~0x00000008); + nodeCount_ = 0; + onChanged(); + return this; + } + + private int processingUnits_; + /** + * + * + *
    +     * The number of processing units allocated to this instance. At most, one of
    +     * either `processing_units` or `node_count` should be present in the message.
    +     *
    +     * Users can set the `processing_units` field to specify the target number of
    +     * processing units allocated to the instance.
    +     *
    +     * If autoscaling is enabled, `processing_units` is treated as an
    +     * `OUTPUT_ONLY` field and reflects the current number of processing units
    +     * allocated to the instance.
    +     *
    +     * This might be zero in API responses for instances that are not yet in the
    +     * `READY` state.
    +     *
    +     * If the instance has varying processing units per replica
    +     * (achieved by setting asymmetric_autoscaling_options in autoscaling config),
    +     * the processing_units here is the maximum processing units across all
    +     * replicas.
    +     *
    +     * For more information, see
    +     * [Compute capacity, nodes and processing
    +     * units](https://cloud.google.com/spanner/docs/compute-capacity).
    +     * 
    + * + * int32 processing_units = 9; + * + * @return The processingUnits. + */ + @java.lang.Override + public int getProcessingUnits() { + return processingUnits_; + } + /** + * + * + *
    +     * The number of processing units allocated to this instance. At most, one of
    +     * either `processing_units` or `node_count` should be present in the message.
    +     *
    +     * Users can set the `processing_units` field to specify the target number of
    +     * processing units allocated to the instance.
    +     *
    +     * If autoscaling is enabled, `processing_units` is treated as an
    +     * `OUTPUT_ONLY` field and reflects the current number of processing units
    +     * allocated to the instance.
    +     *
    +     * This might be zero in API responses for instances that are not yet in the
    +     * `READY` state.
    +     *
    +     * If the instance has varying processing units per replica
    +     * (achieved by setting asymmetric_autoscaling_options in autoscaling config),
    +     * the processing_units here is the maximum processing units across all
    +     * replicas.
    +     *
    +     * For more information, see
    +     * [Compute capacity, nodes and processing
    +     * units](https://cloud.google.com/spanner/docs/compute-capacity).
    +     * 
    + * + * int32 processing_units = 9; + * + * @param value The processingUnits to set. + * @return This builder for chaining. + */ + public Builder setProcessingUnits(int value) { + + processingUnits_ = value; + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + /** + * + * + *
    +     * The number of processing units allocated to this instance. At most, one of
    +     * either `processing_units` or `node_count` should be present in the message.
    +     *
    +     * Users can set the `processing_units` field to specify the target number of
    +     * processing units allocated to the instance.
    +     *
    +     * If autoscaling is enabled, `processing_units` is treated as an
    +     * `OUTPUT_ONLY` field and reflects the current number of processing units
    +     * allocated to the instance.
    +     *
    +     * This might be zero in API responses for instances that are not yet in the
    +     * `READY` state.
    +     *
    +     * If the instance has varying processing units per replica
    +     * (achieved by setting asymmetric_autoscaling_options in autoscaling config),
    +     * the processing_units here is the maximum processing units across all
    +     * replicas.
    +     *
    +     * For more information, see
    +     * [Compute capacity, nodes and processing
    +     * units](https://cloud.google.com/spanner/docs/compute-capacity).
    +     * 
    + * + * int32 processing_units = 9; + * + * @return This builder for chaining. + */ + public Builder clearProcessingUnits() { + bitField0_ = (bitField0_ & ~0x00000010); + processingUnits_ = 0; + onChanged(); + return this; + } + + private java.util.List + replicaComputeCapacity_ = java.util.Collections.emptyList(); + + private void ensureReplicaComputeCapacityIsMutable() { + if (!((bitField0_ & 0x00000020) != 0)) { + replicaComputeCapacity_ = + new java.util.ArrayList( + replicaComputeCapacity_); + bitField0_ |= 0x00000020; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity, + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.Builder, + com.google.spanner.admin.instance.v1.ReplicaComputeCapacityOrBuilder> + replicaComputeCapacityBuilder_; + + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getReplicaComputeCapacityList() { + if (replicaComputeCapacityBuilder_ == null) { + return java.util.Collections.unmodifiableList(replicaComputeCapacity_); + } else { + return replicaComputeCapacityBuilder_.getMessageList(); + } + } + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public int getReplicaComputeCapacityCount() { + if (replicaComputeCapacityBuilder_ == null) { + return replicaComputeCapacity_.size(); + } else { + return replicaComputeCapacityBuilder_.getCount(); + } + } + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.instance.v1.ReplicaComputeCapacity getReplicaComputeCapacity( + int index) { + if (replicaComputeCapacityBuilder_ == null) { + return replicaComputeCapacity_.get(index); + } else { + return replicaComputeCapacityBuilder_.getMessage(index); + } + } + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setReplicaComputeCapacity( + int index, com.google.spanner.admin.instance.v1.ReplicaComputeCapacity value) { + if (replicaComputeCapacityBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplicaComputeCapacityIsMutable(); + replicaComputeCapacity_.set(index, value); + onChanged(); + } else { + replicaComputeCapacityBuilder_.setMessage(index, value); + } + return this; + } + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setReplicaComputeCapacity( + int index, + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.Builder builderForValue) { + if (replicaComputeCapacityBuilder_ == null) { + ensureReplicaComputeCapacityIsMutable(); + replicaComputeCapacity_.set(index, builderForValue.build()); + onChanged(); + } else { + replicaComputeCapacityBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addReplicaComputeCapacity( + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity value) { + if (replicaComputeCapacityBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplicaComputeCapacityIsMutable(); + replicaComputeCapacity_.add(value); + onChanged(); + } else { + replicaComputeCapacityBuilder_.addMessage(value); + } + return this; + } + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addReplicaComputeCapacity( + int index, com.google.spanner.admin.instance.v1.ReplicaComputeCapacity value) { + if (replicaComputeCapacityBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplicaComputeCapacityIsMutable(); + replicaComputeCapacity_.add(index, value); + onChanged(); + } else { + replicaComputeCapacityBuilder_.addMessage(index, value); + } + return this; + } + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addReplicaComputeCapacity( + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.Builder builderForValue) { + if (replicaComputeCapacityBuilder_ == null) { + ensureReplicaComputeCapacityIsMutable(); + replicaComputeCapacity_.add(builderForValue.build()); + onChanged(); + } else { + replicaComputeCapacityBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * + * + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
          * 
    * - * string display_name = 3 [(.google.api.field_behavior) = REQUIRED]; - * - * @param value The bytes for displayName to set. - * @return This builder for chaining. + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * */ - public Builder setDisplayNameBytes(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); + public Builder addReplicaComputeCapacity( + int index, + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.Builder builderForValue) { + if (replicaComputeCapacityBuilder_ == null) { + ensureReplicaComputeCapacityIsMutable(); + replicaComputeCapacity_.add(index, builderForValue.build()); + onChanged(); + } else { + replicaComputeCapacityBuilder_.addMessage(index, builderForValue.build()); } - checkByteStringIsUtf8(value); - displayName_ = value; - bitField0_ |= 0x00000004; - onChanged(); return this; } - - private int nodeCount_; /** * * *
    -     * The number of nodes allocated to this instance. At most one of either
    -     * node_count or processing_units should be present in the message.
    -     *
    -     * Users can set the node_count field to specify the target number of nodes
    -     * allocated to the instance.
    -     *
    -     * This may be zero in API responses for instances that are not yet in state
    -     * `READY`.
    -     *
    -     * See [the
    -     * documentation](https://cloud.google.com/spanner/docs/compute-capacity)
    -     * for more information about nodes and processing units.
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
          * 
    * - * int32 node_count = 5; - * - * @return The nodeCount. + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * */ - @java.lang.Override - public int getNodeCount() { - return nodeCount_; + public Builder addAllReplicaComputeCapacity( + java.lang.Iterable + values) { + if (replicaComputeCapacityBuilder_ == null) { + ensureReplicaComputeCapacityIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, replicaComputeCapacity_); + onChanged(); + } else { + replicaComputeCapacityBuilder_.addAllMessages(values); + } + return this; } /** * * *
    -     * The number of nodes allocated to this instance. At most one of either
    -     * node_count or processing_units should be present in the message.
    -     *
    -     * Users can set the node_count field to specify the target number of nodes
    -     * allocated to the instance.
    -     *
    -     * This may be zero in API responses for instances that are not yet in state
    -     * `READY`.
    -     *
    -     * See [the
    -     * documentation](https://cloud.google.com/spanner/docs/compute-capacity)
    -     * for more information about nodes and processing units.
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
          * 
    * - * int32 node_count = 5; - * - * @param value The nodeCount to set. - * @return This builder for chaining. + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * */ - public Builder setNodeCount(int value) { - - nodeCount_ = value; - bitField0_ |= 0x00000008; - onChanged(); + public Builder clearReplicaComputeCapacity() { + if (replicaComputeCapacityBuilder_ == null) { + replicaComputeCapacity_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + } else { + replicaComputeCapacityBuilder_.clear(); + } return this; } /** * * *
    -     * The number of nodes allocated to this instance. At most one of either
    -     * node_count or processing_units should be present in the message.
    -     *
    -     * Users can set the node_count field to specify the target number of nodes
    -     * allocated to the instance.
    -     *
    -     * This may be zero in API responses for instances that are not yet in state
    -     * `READY`.
    -     *
    -     * See [the
    -     * documentation](https://cloud.google.com/spanner/docs/compute-capacity)
    -     * for more information about nodes and processing units.
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
          * 
    * - * int32 node_count = 5; - * - * @return This builder for chaining. + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * */ - public Builder clearNodeCount() { - bitField0_ = (bitField0_ & ~0x00000008); - nodeCount_ = 0; - onChanged(); + public Builder removeReplicaComputeCapacity(int index) { + if (replicaComputeCapacityBuilder_ == null) { + ensureReplicaComputeCapacityIsMutable(); + replicaComputeCapacity_.remove(index); + onChanged(); + } else { + replicaComputeCapacityBuilder_.remove(index); + } return this; } - - private int processingUnits_; /** * * *
    -     * The number of processing units allocated to this instance. At most one of
    -     * processing_units or node_count should be present in the message.
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    * - * Users can set the processing_units field to specify the target number of - * processing units allocated to the instance. + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.Builder + getReplicaComputeCapacityBuilder(int index) { + return getReplicaComputeCapacityFieldBuilder().getBuilder(index); + } + /** * - * This may be zero in API responses for instances that are not yet in state - * `READY`. * - * See [the - * documentation](https://cloud.google.com/spanner/docs/compute-capacity) - * for more information about nodes and processing units. + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
          * 
    * - * int32 processing_units = 9; - * - * @return The processingUnits. + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * */ - @java.lang.Override - public int getProcessingUnits() { - return processingUnits_; + public com.google.spanner.admin.instance.v1.ReplicaComputeCapacityOrBuilder + getReplicaComputeCapacityOrBuilder(int index) { + if (replicaComputeCapacityBuilder_ == null) { + return replicaComputeCapacity_.get(index); + } else { + return replicaComputeCapacityBuilder_.getMessageOrBuilder(index); + } } /** * * *
    -     * The number of processing units allocated to this instance. At most one of
    -     * processing_units or node_count should be present in the message.
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    * - * Users can set the processing_units field to specify the target number of - * processing units allocated to the instance. + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List< + ? extends com.google.spanner.admin.instance.v1.ReplicaComputeCapacityOrBuilder> + getReplicaComputeCapacityOrBuilderList() { + if (replicaComputeCapacityBuilder_ != null) { + return replicaComputeCapacityBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(replicaComputeCapacity_); + } + } + /** * - * This may be zero in API responses for instances that are not yet in state - * `READY`. * - * See [the - * documentation](https://cloud.google.com/spanner/docs/compute-capacity) - * for more information about nodes and processing units. + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
          * 
    * - * int32 processing_units = 9; - * - * @param value The processingUnits to set. - * @return This builder for chaining. + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * */ - public Builder setProcessingUnits(int value) { - - processingUnits_ = value; - bitField0_ |= 0x00000010; - onChanged(); - return this; + public com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.Builder + addReplicaComputeCapacityBuilder() { + return getReplicaComputeCapacityFieldBuilder() + .addBuilder( + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.getDefaultInstance()); } /** * * *
    -     * The number of processing units allocated to this instance. At most one of
    -     * processing_units or node_count should be present in the message.
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
    +     * 
    * - * Users can set the processing_units field to specify the target number of - * processing units allocated to the instance. + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.Builder + addReplicaComputeCapacityBuilder(int index) { + return getReplicaComputeCapacityFieldBuilder() + .addBuilder( + index, + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.getDefaultInstance()); + } + /** * - * This may be zero in API responses for instances that are not yet in state - * `READY`. * - * See [the - * documentation](https://cloud.google.com/spanner/docs/compute-capacity) - * for more information about nodes and processing units. + *
    +     * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +     * selection identifies a set of replicas with common properties. Replicas
    +     * identified by a ReplicaSelection are scaled with the same compute capacity.
          * 
    * - * int32 processing_units = 9; - * - * @return This builder for chaining. + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * */ - public Builder clearProcessingUnits() { - bitField0_ = (bitField0_ & ~0x00000010); - processingUnits_ = 0; - onChanged(); - return this; + public java.util.List + getReplicaComputeCapacityBuilderList() { + return getReplicaComputeCapacityFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity, + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.Builder, + com.google.spanner.admin.instance.v1.ReplicaComputeCapacityOrBuilder> + getReplicaComputeCapacityFieldBuilder() { + if (replicaComputeCapacityBuilder_ == null) { + replicaComputeCapacityBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity, + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.Builder, + com.google.spanner.admin.instance.v1.ReplicaComputeCapacityOrBuilder>( + replicaComputeCapacity_, + ((bitField0_ & 0x00000020) != 0), + getParentForChildren(), + isClean()); + replicaComputeCapacity_ = null; + } + return replicaComputeCapacityBuilder_; } private com.google.spanner.admin.instance.v1.AutoscalingConfig autoscalingConfig_; @@ -2155,7 +3081,7 @@ public Builder clearProcessingUnits() { * @return Whether the autoscalingConfig field is set. */ public boolean hasAutoscalingConfig() { - return ((bitField0_ & 0x00000020) != 0); + return ((bitField0_ & 0x00000040) != 0); } /** * @@ -2206,7 +3132,7 @@ public Builder setAutoscalingConfig( } else { autoscalingConfigBuilder_.setMessage(value); } - bitField0_ |= 0x00000020; + bitField0_ |= 0x00000040; onChanged(); return this; } @@ -2231,7 +3157,7 @@ public Builder setAutoscalingConfig( } else { autoscalingConfigBuilder_.setMessage(builderForValue.build()); } - bitField0_ |= 0x00000020; + bitField0_ |= 0x00000040; onChanged(); return this; } @@ -2252,7 +3178,7 @@ public Builder setAutoscalingConfig( public Builder mergeAutoscalingConfig( com.google.spanner.admin.instance.v1.AutoscalingConfig value) { if (autoscalingConfigBuilder_ == null) { - if (((bitField0_ & 0x00000020) != 0) + if (((bitField0_ & 0x00000040) != 0) && autoscalingConfig_ != null && autoscalingConfig_ != com.google.spanner.admin.instance.v1.AutoscalingConfig.getDefaultInstance()) { @@ -2264,7 +3190,7 @@ public Builder mergeAutoscalingConfig( autoscalingConfigBuilder_.mergeFrom(value); } if (autoscalingConfig_ != null) { - bitField0_ |= 0x00000020; + bitField0_ |= 0x00000040; onChanged(); } return this; @@ -2284,7 +3210,7 @@ public Builder mergeAutoscalingConfig( *
    */ public Builder clearAutoscalingConfig() { - bitField0_ = (bitField0_ & ~0x00000020); + bitField0_ = (bitField0_ & ~0x00000040); autoscalingConfig_ = null; if (autoscalingConfigBuilder_ != null) { autoscalingConfigBuilder_.dispose(); @@ -2309,7 +3235,7 @@ public Builder clearAutoscalingConfig() { */ public com.google.spanner.admin.instance.v1.AutoscalingConfig.Builder getAutoscalingConfigBuilder() { - bitField0_ |= 0x00000020; + bitField0_ |= 0x00000040; onChanged(); return getAutoscalingConfigFieldBuilder().getBuilder(); } @@ -2410,7 +3336,7 @@ public int getStateValue() { */ public Builder setStateValue(int value) { state_ = value; - bitField0_ |= 0x00000040; + bitField0_ |= 0x00000080; onChanged(); return this; } @@ -2461,7 +3387,7 @@ public Builder setState(com.google.spanner.admin.instance.v1.Instance.State valu if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000040; + bitField0_ |= 0x00000080; state_ = value.getNumber(); onChanged(); return this; @@ -2484,7 +3410,7 @@ public Builder setState(com.google.spanner.admin.instance.v1.Instance.State valu * @return This builder for chaining. */ public Builder clearState() { - bitField0_ = (bitField0_ & ~0x00000040); + bitField0_ = (bitField0_ & ~0x00000080); state_ = 0; onChanged(); return this; @@ -2507,7 +3433,7 @@ private com.google.protobuf.MapField interna if (!labels_.isMutable()) { labels_ = labels_.copy(); } - bitField0_ |= 0x00000080; + bitField0_ |= 0x00000100; onChanged(); return labels_; } @@ -2672,7 +3598,7 @@ public java.lang.String getLabelsOrThrow(java.lang.String key) { } public Builder clearLabels() { - bitField0_ = (bitField0_ & ~0x00000080); + bitField0_ = (bitField0_ & ~0x00000100); internalGetMutableLabels().getMutableMap().clear(); return this; } @@ -2715,7 +3641,7 @@ public Builder removeLabels(java.lang.String key) { /** Use alternate mutation accessors instead. */ @java.lang.Deprecated public java.util.Map getMutableLabels() { - bitField0_ |= 0x00000080; + bitField0_ |= 0x00000100; return internalGetMutableLabels().getMutableMap(); } /** @@ -2755,7 +3681,7 @@ public Builder putLabels(java.lang.String key, java.lang.String value) { throw new NullPointerException("map value"); } internalGetMutableLabels().getMutableMap().put(key, value); - bitField0_ |= 0x00000080; + bitField0_ |= 0x00000100; return this; } /** @@ -2789,7 +3715,7 @@ public Builder putLabels(java.lang.String key, java.lang.String value) { */ public Builder putAllLabels(java.util.Map values) { internalGetMutableLabels().getMutableMap().putAll(values); - bitField0_ |= 0x00000080; + bitField0_ |= 0x00000100; return this; } @@ -2800,7 +3726,7 @@ private void ensureEndpointUrisIsMutable() { if (!endpointUris_.isModifiable()) { endpointUris_ = new com.google.protobuf.LazyStringArrayList(endpointUris_); } - bitField0_ |= 0x00000100; + bitField0_ |= 0x00000200; } /** * @@ -2880,7 +3806,7 @@ public Builder setEndpointUris(int index, java.lang.String value) { } ensureEndpointUrisIsMutable(); endpointUris_.set(index, value); - bitField0_ |= 0x00000100; + bitField0_ |= 0x00000200; onChanged(); return this; } @@ -2902,7 +3828,7 @@ public Builder addEndpointUris(java.lang.String value) { } ensureEndpointUrisIsMutable(); endpointUris_.add(value); - bitField0_ |= 0x00000100; + bitField0_ |= 0x00000200; onChanged(); return this; } @@ -2921,7 +3847,7 @@ public Builder addEndpointUris(java.lang.String value) { public Builder addAllEndpointUris(java.lang.Iterable values) { ensureEndpointUrisIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll(values, endpointUris_); - bitField0_ |= 0x00000100; + bitField0_ |= 0x00000200; onChanged(); return this; } @@ -2938,7 +3864,7 @@ public Builder addAllEndpointUris(java.lang.Iterable values) { */ public Builder clearEndpointUris() { endpointUris_ = com.google.protobuf.LazyStringArrayList.emptyList(); - bitField0_ = (bitField0_ & ~0x00000100); + bitField0_ = (bitField0_ & ~0x00000200); ; onChanged(); return this; @@ -2962,7 +3888,7 @@ public Builder addEndpointUrisBytes(com.google.protobuf.ByteString value) { checkByteStringIsUtf8(value); ensureEndpointUrisIsMutable(); endpointUris_.add(value); - bitField0_ |= 0x00000100; + bitField0_ |= 0x00000200; onChanged(); return this; } @@ -2987,7 +3913,7 @@ public Builder addEndpointUrisBytes(com.google.protobuf.ByteString value) { * @return Whether the createTime field is set. */ public boolean hasCreateTime() { - return ((bitField0_ & 0x00000200) != 0); + return ((bitField0_ & 0x00000400) != 0); } /** * @@ -3031,7 +3957,7 @@ public Builder setCreateTime(com.google.protobuf.Timestamp value) { } else { createTimeBuilder_.setMessage(value); } - bitField0_ |= 0x00000200; + bitField0_ |= 0x00000400; onChanged(); return this; } @@ -3052,7 +3978,7 @@ public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForVal } else { createTimeBuilder_.setMessage(builderForValue.build()); } - bitField0_ |= 0x00000200; + bitField0_ |= 0x00000400; onChanged(); return this; } @@ -3069,7 +3995,7 @@ public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForVal */ public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { if (createTimeBuilder_ == null) { - if (((bitField0_ & 0x00000200) != 0) + if (((bitField0_ & 0x00000400) != 0) && createTime_ != null && createTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { getCreateTimeBuilder().mergeFrom(value); @@ -3080,7 +4006,7 @@ public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { createTimeBuilder_.mergeFrom(value); } if (createTime_ != null) { - bitField0_ |= 0x00000200; + bitField0_ |= 0x00000400; onChanged(); } return this; @@ -3097,7 +4023,7 @@ public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { *
    */ public Builder clearCreateTime() { - bitField0_ = (bitField0_ & ~0x00000200); + bitField0_ = (bitField0_ & ~0x00000400); createTime_ = null; if (createTimeBuilder_ != null) { createTimeBuilder_.dispose(); @@ -3118,7 +4044,7 @@ public Builder clearCreateTime() { *
    */ public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { - bitField0_ |= 0x00000200; + bitField0_ |= 0x00000400; onChanged(); return getCreateTimeFieldBuilder().getBuilder(); } @@ -3190,7 +4116,7 @@ public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { * @return Whether the updateTime field is set. */ public boolean hasUpdateTime() { - return ((bitField0_ & 0x00000400) != 0); + return ((bitField0_ & 0x00000800) != 0); } /** * @@ -3234,7 +4160,7 @@ public Builder setUpdateTime(com.google.protobuf.Timestamp value) { } else { updateTimeBuilder_.setMessage(value); } - bitField0_ |= 0x00000400; + bitField0_ |= 0x00000800; onChanged(); return this; } @@ -3255,7 +4181,7 @@ public Builder setUpdateTime(com.google.protobuf.Timestamp.Builder builderForVal } else { updateTimeBuilder_.setMessage(builderForValue.build()); } - bitField0_ |= 0x00000400; + bitField0_ |= 0x00000800; onChanged(); return this; } @@ -3272,7 +4198,7 @@ public Builder setUpdateTime(com.google.protobuf.Timestamp.Builder builderForVal */ public Builder mergeUpdateTime(com.google.protobuf.Timestamp value) { if (updateTimeBuilder_ == null) { - if (((bitField0_ & 0x00000400) != 0) + if (((bitField0_ & 0x00000800) != 0) && updateTime_ != null && updateTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { getUpdateTimeBuilder().mergeFrom(value); @@ -3283,7 +4209,7 @@ public Builder mergeUpdateTime(com.google.protobuf.Timestamp value) { updateTimeBuilder_.mergeFrom(value); } if (updateTime_ != null) { - bitField0_ |= 0x00000400; + bitField0_ |= 0x00000800; onChanged(); } return this; @@ -3300,7 +4226,7 @@ public Builder mergeUpdateTime(com.google.protobuf.Timestamp value) { *
    */ public Builder clearUpdateTime() { - bitField0_ = (bitField0_ & ~0x00000400); + bitField0_ = (bitField0_ & ~0x00000800); updateTime_ = null; if (updateTimeBuilder_ != null) { updateTimeBuilder_.dispose(); @@ -3321,7 +4247,7 @@ public Builder clearUpdateTime() { *
    */ public com.google.protobuf.Timestamp.Builder getUpdateTimeBuilder() { - bitField0_ |= 0x00000400; + bitField0_ |= 0x00000800; onChanged(); return getUpdateTimeFieldBuilder().getBuilder(); } @@ -3373,6 +4299,108 @@ public com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder() { return updateTimeBuilder_; } + private int edition_ = 0; + /** + * + * + *
    +     * Optional. The `Edition` of the current instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for edition. + */ + @java.lang.Override + public int getEditionValue() { + return edition_; + } + /** + * + * + *
    +     * Optional. The `Edition` of the current instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The enum numeric value on the wire for edition to set. + * @return This builder for chaining. + */ + public Builder setEditionValue(int value) { + edition_ = value; + bitField0_ |= 0x00001000; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. The `Edition` of the current instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The edition. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.Instance.Edition getEdition() { + com.google.spanner.admin.instance.v1.Instance.Edition result = + com.google.spanner.admin.instance.v1.Instance.Edition.forNumber(edition_); + return result == null + ? com.google.spanner.admin.instance.v1.Instance.Edition.UNRECOGNIZED + : result; + } + /** + * + * + *
    +     * Optional. The `Edition` of the current instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The edition to set. + * @return This builder for chaining. + */ + public Builder setEdition(com.google.spanner.admin.instance.v1.Instance.Edition value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00001000; + edition_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. The `Edition` of the current instance.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearEdition() { + bitField0_ = (bitField0_ & ~0x00001000); + edition_ = 0; + onChanged(); + return this; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceConfig.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceConfig.java index 10ad9945d8a..fe9c5c59c31 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceConfig.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceConfig.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** @@ -247,7 +247,7 @@ private Type(int value) { * * *
    -   * Indicates the current state of the instance config.
    +   * Indicates the current state of the instance configuration.
        * 
    * * Protobuf enum {@code google.spanner.admin.instance.v1.InstanceConfig.State} @@ -267,7 +267,7 @@ public enum State implements com.google.protobuf.ProtocolMessageEnum { * * *
    -     * The instance config is still being created.
    +     * The instance configuration is still being created.
          * 
    * * CREATING = 1; @@ -277,8 +277,8 @@ public enum State implements com.google.protobuf.ProtocolMessageEnum { * * *
    -     * The instance config is fully created and ready to be used to create
    -     * instances.
    +     * The instance configuration is fully created and ready to be used to
    +     * create instances.
          * 
    * * READY = 2; @@ -301,7 +301,7 @@ public enum State implements com.google.protobuf.ProtocolMessageEnum { * * *
    -     * The instance config is still being created.
    +     * The instance configuration is still being created.
          * 
    * * CREATING = 1; @@ -311,8 +311,8 @@ public enum State implements com.google.protobuf.ProtocolMessageEnum { * * *
    -     * The instance config is fully created and ready to be used to create
    -     * instances.
    +     * The instance configuration is fully created and ready to be used to
    +     * create instances.
          * 
    * * READY = 2; @@ -415,6 +415,8 @@ private State(int value) { * A unique identifier for the instance configuration. Values * are of the form * `projects/<project>/instanceConfigs/[a-z][-a-z0-9]*`. + * + * User instance configuration must start with `custom-`. *
    * * string name = 1; @@ -440,6 +442,8 @@ public java.lang.String getName() { * A unique identifier for the instance configuration. Values * are of the form * `projects/<project>/instanceConfigs/[a-z][-a-z0-9]*`. + * + * User instance configuration must start with `custom-`. *
    * * string name = 1; @@ -516,8 +520,8 @@ public com.google.protobuf.ByteString getDisplayNameBytes() { * * *
    -   * Output only. Whether this instance config is a Google or User Managed
    -   * Configuration.
    +   * Output only. Whether this instance configuration is a Google-managed or
    +   * user-managed configuration.
        * 
    * * @@ -534,8 +538,8 @@ public int getConfigTypeValue() { * * *
    -   * Output only. Whether this instance config is a Google or User Managed
    -   * Configuration.
    +   * Output only. Whether this instance configuration is a Google-managed or
    +   * user-managed configuration.
        * 
    * * @@ -965,15 +969,16 @@ public java.lang.String getLabelsOrThrow(java.lang.String key) { * *
        * etag is used for optimistic concurrency control as a way
    -   * to help prevent simultaneous updates of a instance config from overwriting
    -   * each other. It is strongly suggested that systems make use of the etag in
    -   * the read-modify-write cycle to perform instance config updates in order to
    -   * avoid race conditions: An etag is returned in the response which contains
    -   * instance configs, and systems are expected to put that etag in the request
    -   * to update instance config to ensure that their change will be applied to
    -   * the same version of the instance config.
    -   * If no etag is provided in the call to update instance config, then the
    -   * existing instance config is overwritten blindly.
    +   * to help prevent simultaneous updates of a instance configuration from
    +   * overwriting each other. It is strongly suggested that systems make use of
    +   * the etag in the read-modify-write cycle to perform instance configuration
    +   * updates in order to avoid race conditions: An etag is returned in the
    +   * response which contains instance configurations, and systems are expected
    +   * to put that etag in the request to update instance configuration to ensure
    +   * that their change is applied to the same version of the instance
    +   * configuration. If no etag is provided in the call to update the instance
    +   * configuration, then the existing instance configuration is overwritten
    +   * blindly.
        * 
    * * string etag = 9; @@ -997,15 +1002,16 @@ public java.lang.String getEtag() { * *
        * etag is used for optimistic concurrency control as a way
    -   * to help prevent simultaneous updates of a instance config from overwriting
    -   * each other. It is strongly suggested that systems make use of the etag in
    -   * the read-modify-write cycle to perform instance config updates in order to
    -   * avoid race conditions: An etag is returned in the response which contains
    -   * instance configs, and systems are expected to put that etag in the request
    -   * to update instance config to ensure that their change will be applied to
    -   * the same version of the instance config.
    -   * If no etag is provided in the call to update instance config, then the
    -   * existing instance config is overwritten blindly.
    +   * to help prevent simultaneous updates of a instance configuration from
    +   * overwriting each other. It is strongly suggested that systems make use of
    +   * the etag in the read-modify-write cycle to perform instance configuration
    +   * updates in order to avoid race conditions: An etag is returned in the
    +   * response which contains instance configurations, and systems are expected
    +   * to put that etag in the request to update instance configuration to ensure
    +   * that their change is applied to the same version of the instance
    +   * configuration. If no etag is provided in the call to update the instance
    +   * configuration, then the existing instance configuration is overwritten
    +   * blindly.
        * 
    * * string etag = 9; @@ -1099,8 +1105,9 @@ public com.google.protobuf.ByteString getLeaderOptionsBytes(int index) { * * *
    -   * Output only. If true, the instance config is being created or updated. If
    -   * false, there are no ongoing operations for the instance config.
    +   * Output only. If true, the instance configuration is being created or
    +   * updated. If false, there are no ongoing operations for the instance
    +   * configuration.
        * 
    * * bool reconciling = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -1118,7 +1125,8 @@ public boolean getReconciling() { * * *
    -   * Output only. The current instance config state.
    +   * Output only. The current instance configuration state. Applicable only for
    +   * `USER_MANAGED` configurations.
        * 
    * * @@ -1135,7 +1143,8 @@ public int getStateValue() { * * *
    -   * Output only. The current instance config state.
    +   * Output only. The current instance configuration state. Applicable only for
    +   * `USER_MANAGED` configurations.
        * 
    * * @@ -1885,6 +1894,8 @@ public Builder mergeFrom( * A unique identifier for the instance configuration. Values * are of the form * `projects/<project>/instanceConfigs/[a-z][-a-z0-9]*`. + * + * User instance configuration must start with `custom-`. *
    * * string name = 1; @@ -1909,6 +1920,8 @@ public java.lang.String getName() { * A unique identifier for the instance configuration. Values * are of the form * `projects/<project>/instanceConfigs/[a-z][-a-z0-9]*`. + * + * User instance configuration must start with `custom-`. * * * string name = 1; @@ -1933,6 +1946,8 @@ public com.google.protobuf.ByteString getNameBytes() { * A unique identifier for the instance configuration. Values * are of the form * `projects/<project>/instanceConfigs/[a-z][-a-z0-9]*`. + * + * User instance configuration must start with `custom-`. * * * string name = 1; @@ -1956,6 +1971,8 @@ public Builder setName(java.lang.String value) { * A unique identifier for the instance configuration. Values * are of the form * `projects/<project>/instanceConfigs/[a-z][-a-z0-9]*`. + * + * User instance configuration must start with `custom-`. * * * string name = 1; @@ -1975,6 +1992,8 @@ public Builder clearName() { * A unique identifier for the instance configuration. Values * are of the form * `projects/<project>/instanceConfigs/[a-z][-a-z0-9]*`. + * + * User instance configuration must start with `custom-`. * * * string name = 1; @@ -2104,8 +2123,8 @@ public Builder setDisplayNameBytes(com.google.protobuf.ByteString value) { * * *
    -     * Output only. Whether this instance config is a Google or User Managed
    -     * Configuration.
    +     * Output only. Whether this instance configuration is a Google-managed or
    +     * user-managed configuration.
          * 
    * * @@ -2122,8 +2141,8 @@ public int getConfigTypeValue() { * * *
    -     * Output only. Whether this instance config is a Google or User Managed
    -     * Configuration.
    +     * Output only. Whether this instance configuration is a Google-managed or
    +     * user-managed configuration.
          * 
    * * @@ -2143,8 +2162,8 @@ public Builder setConfigTypeValue(int value) { * * *
    -     * Output only. Whether this instance config is a Google or User Managed
    -     * Configuration.
    +     * Output only. Whether this instance configuration is a Google-managed or
    +     * user-managed configuration.
          * 
    * * @@ -2165,8 +2184,8 @@ public com.google.spanner.admin.instance.v1.InstanceConfig.Type getConfigType() * * *
    -     * Output only. Whether this instance config is a Google or User Managed
    -     * Configuration.
    +     * Output only. Whether this instance configuration is a Google-managed or
    +     * user-managed configuration.
          * 
    * * @@ -2189,8 +2208,8 @@ public Builder setConfigType(com.google.spanner.admin.instance.v1.InstanceConfig * * *
    -     * Output only. Whether this instance config is a Google or User Managed
    -     * Configuration.
    +     * Output only. Whether this instance configuration is a Google-managed or
    +     * user-managed configuration.
          * 
    * * @@ -3421,15 +3440,16 @@ public Builder putAllLabels(java.util.Map va * *
          * etag is used for optimistic concurrency control as a way
    -     * to help prevent simultaneous updates of a instance config from overwriting
    -     * each other. It is strongly suggested that systems make use of the etag in
    -     * the read-modify-write cycle to perform instance config updates in order to
    -     * avoid race conditions: An etag is returned in the response which contains
    -     * instance configs, and systems are expected to put that etag in the request
    -     * to update instance config to ensure that their change will be applied to
    -     * the same version of the instance config.
    -     * If no etag is provided in the call to update instance config, then the
    -     * existing instance config is overwritten blindly.
    +     * to help prevent simultaneous updates of a instance configuration from
    +     * overwriting each other. It is strongly suggested that systems make use of
    +     * the etag in the read-modify-write cycle to perform instance configuration
    +     * updates in order to avoid race conditions: An etag is returned in the
    +     * response which contains instance configurations, and systems are expected
    +     * to put that etag in the request to update instance configuration to ensure
    +     * that their change is applied to the same version of the instance
    +     * configuration. If no etag is provided in the call to update the instance
    +     * configuration, then the existing instance configuration is overwritten
    +     * blindly.
          * 
    * * string etag = 9; @@ -3452,15 +3472,16 @@ public java.lang.String getEtag() { * *
          * etag is used for optimistic concurrency control as a way
    -     * to help prevent simultaneous updates of a instance config from overwriting
    -     * each other. It is strongly suggested that systems make use of the etag in
    -     * the read-modify-write cycle to perform instance config updates in order to
    -     * avoid race conditions: An etag is returned in the response which contains
    -     * instance configs, and systems are expected to put that etag in the request
    -     * to update instance config to ensure that their change will be applied to
    -     * the same version of the instance config.
    -     * If no etag is provided in the call to update instance config, then the
    -     * existing instance config is overwritten blindly.
    +     * to help prevent simultaneous updates of a instance configuration from
    +     * overwriting each other. It is strongly suggested that systems make use of
    +     * the etag in the read-modify-write cycle to perform instance configuration
    +     * updates in order to avoid race conditions: An etag is returned in the
    +     * response which contains instance configurations, and systems are expected
    +     * to put that etag in the request to update instance configuration to ensure
    +     * that their change is applied to the same version of the instance
    +     * configuration. If no etag is provided in the call to update the instance
    +     * configuration, then the existing instance configuration is overwritten
    +     * blindly.
          * 
    * * string etag = 9; @@ -3483,15 +3504,16 @@ public com.google.protobuf.ByteString getEtagBytes() { * *
          * etag is used for optimistic concurrency control as a way
    -     * to help prevent simultaneous updates of a instance config from overwriting
    -     * each other. It is strongly suggested that systems make use of the etag in
    -     * the read-modify-write cycle to perform instance config updates in order to
    -     * avoid race conditions: An etag is returned in the response which contains
    -     * instance configs, and systems are expected to put that etag in the request
    -     * to update instance config to ensure that their change will be applied to
    -     * the same version of the instance config.
    -     * If no etag is provided in the call to update instance config, then the
    -     * existing instance config is overwritten blindly.
    +     * to help prevent simultaneous updates of a instance configuration from
    +     * overwriting each other. It is strongly suggested that systems make use of
    +     * the etag in the read-modify-write cycle to perform instance configuration
    +     * updates in order to avoid race conditions: An etag is returned in the
    +     * response which contains instance configurations, and systems are expected
    +     * to put that etag in the request to update instance configuration to ensure
    +     * that their change is applied to the same version of the instance
    +     * configuration. If no etag is provided in the call to update the instance
    +     * configuration, then the existing instance configuration is overwritten
    +     * blindly.
          * 
    * * string etag = 9; @@ -3513,15 +3535,16 @@ public Builder setEtag(java.lang.String value) { * *
          * etag is used for optimistic concurrency control as a way
    -     * to help prevent simultaneous updates of a instance config from overwriting
    -     * each other. It is strongly suggested that systems make use of the etag in
    -     * the read-modify-write cycle to perform instance config updates in order to
    -     * avoid race conditions: An etag is returned in the response which contains
    -     * instance configs, and systems are expected to put that etag in the request
    -     * to update instance config to ensure that their change will be applied to
    -     * the same version of the instance config.
    -     * If no etag is provided in the call to update instance config, then the
    -     * existing instance config is overwritten blindly.
    +     * to help prevent simultaneous updates of a instance configuration from
    +     * overwriting each other. It is strongly suggested that systems make use of
    +     * the etag in the read-modify-write cycle to perform instance configuration
    +     * updates in order to avoid race conditions: An etag is returned in the
    +     * response which contains instance configurations, and systems are expected
    +     * to put that etag in the request to update instance configuration to ensure
    +     * that their change is applied to the same version of the instance
    +     * configuration. If no etag is provided in the call to update the instance
    +     * configuration, then the existing instance configuration is overwritten
    +     * blindly.
          * 
    * * string etag = 9; @@ -3539,15 +3562,16 @@ public Builder clearEtag() { * *
          * etag is used for optimistic concurrency control as a way
    -     * to help prevent simultaneous updates of a instance config from overwriting
    -     * each other. It is strongly suggested that systems make use of the etag in
    -     * the read-modify-write cycle to perform instance config updates in order to
    -     * avoid race conditions: An etag is returned in the response which contains
    -     * instance configs, and systems are expected to put that etag in the request
    -     * to update instance config to ensure that their change will be applied to
    -     * the same version of the instance config.
    -     * If no etag is provided in the call to update instance config, then the
    -     * existing instance config is overwritten blindly.
    +     * to help prevent simultaneous updates of a instance configuration from
    +     * overwriting each other. It is strongly suggested that systems make use of
    +     * the etag in the read-modify-write cycle to perform instance configuration
    +     * updates in order to avoid race conditions: An etag is returned in the
    +     * response which contains instance configurations, and systems are expected
    +     * to put that etag in the request to update instance configuration to ensure
    +     * that their change is applied to the same version of the instance
    +     * configuration. If no etag is provided in the call to update the instance
    +     * configuration, then the existing instance configuration is overwritten
    +     * blindly.
          * 
    * * string etag = 9; @@ -3754,8 +3778,9 @@ public Builder addLeaderOptionsBytes(com.google.protobuf.ByteString value) { * * *
    -     * Output only. If true, the instance config is being created or updated. If
    -     * false, there are no ongoing operations for the instance config.
    +     * Output only. If true, the instance configuration is being created or
    +     * updated. If false, there are no ongoing operations for the instance
    +     * configuration.
          * 
    * * bool reconciling = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -3770,8 +3795,9 @@ public boolean getReconciling() { * * *
    -     * Output only. If true, the instance config is being created or updated. If
    -     * false, there are no ongoing operations for the instance config.
    +     * Output only. If true, the instance configuration is being created or
    +     * updated. If false, there are no ongoing operations for the instance
    +     * configuration.
          * 
    * * bool reconciling = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -3790,8 +3816,9 @@ public Builder setReconciling(boolean value) { * * *
    -     * Output only. If true, the instance config is being created or updated. If
    -     * false, there are no ongoing operations for the instance config.
    +     * Output only. If true, the instance configuration is being created or
    +     * updated. If false, there are no ongoing operations for the instance
    +     * configuration.
          * 
    * * bool reconciling = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -3810,7 +3837,8 @@ public Builder clearReconciling() { * * *
    -     * Output only. The current instance config state.
    +     * Output only. The current instance configuration state. Applicable only for
    +     * `USER_MANAGED` configurations.
          * 
    * * @@ -3827,7 +3855,8 @@ public int getStateValue() { * * *
    -     * Output only. The current instance config state.
    +     * Output only. The current instance configuration state. Applicable only for
    +     * `USER_MANAGED` configurations.
          * 
    * * @@ -3847,7 +3876,8 @@ public Builder setStateValue(int value) { * * *
    -     * Output only. The current instance config state.
    +     * Output only. The current instance configuration state. Applicable only for
    +     * `USER_MANAGED` configurations.
          * 
    * * @@ -3868,7 +3898,8 @@ public com.google.spanner.admin.instance.v1.InstanceConfig.State getState() { * * *
    -     * Output only. The current instance config state.
    +     * Output only. The current instance configuration state. Applicable only for
    +     * `USER_MANAGED` configurations.
          * 
    * * @@ -3891,7 +3922,8 @@ public Builder setState(com.google.spanner.admin.instance.v1.InstanceConfig.Stat * * *
    -     * Output only. The current instance config state.
    +     * Output only. The current instance configuration state. Applicable only for
    +     * `USER_MANAGED` configurations.
          * 
    * * diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceConfigOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceConfigOrBuilder.java index ee8763d47b7..ed36ecec926 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceConfigOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceConfigOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface InstanceConfigOrBuilder @@ -31,6 +31,8 @@ public interface InstanceConfigOrBuilder * A unique identifier for the instance configuration. Values * are of the form * `projects/<project>/instanceConfigs/[a-z][-a-z0-9]*`. + * + * User instance configuration must start with `custom-`. * * * string name = 1; @@ -45,6 +47,8 @@ public interface InstanceConfigOrBuilder * A unique identifier for the instance configuration. Values * are of the form * `projects/<project>/instanceConfigs/[a-z][-a-z0-9]*`. + * + * User instance configuration must start with `custom-`. * * * string name = 1; @@ -82,8 +86,8 @@ public interface InstanceConfigOrBuilder * * *
    -   * Output only. Whether this instance config is a Google or User Managed
    -   * Configuration.
    +   * Output only. Whether this instance configuration is a Google-managed or
    +   * user-managed configuration.
        * 
    * * @@ -97,8 +101,8 @@ public interface InstanceConfigOrBuilder * * *
    -   * Output only. Whether this instance config is a Google or User Managed
    -   * Configuration.
    +   * Output only. Whether this instance configuration is a Google-managed or
    +   * user-managed configuration.
        * 
    * * @@ -427,15 +431,16 @@ java.lang.String getLabelsOrDefault( * *
        * etag is used for optimistic concurrency control as a way
    -   * to help prevent simultaneous updates of a instance config from overwriting
    -   * each other. It is strongly suggested that systems make use of the etag in
    -   * the read-modify-write cycle to perform instance config updates in order to
    -   * avoid race conditions: An etag is returned in the response which contains
    -   * instance configs, and systems are expected to put that etag in the request
    -   * to update instance config to ensure that their change will be applied to
    -   * the same version of the instance config.
    -   * If no etag is provided in the call to update instance config, then the
    -   * existing instance config is overwritten blindly.
    +   * to help prevent simultaneous updates of a instance configuration from
    +   * overwriting each other. It is strongly suggested that systems make use of
    +   * the etag in the read-modify-write cycle to perform instance configuration
    +   * updates in order to avoid race conditions: An etag is returned in the
    +   * response which contains instance configurations, and systems are expected
    +   * to put that etag in the request to update instance configuration to ensure
    +   * that their change is applied to the same version of the instance
    +   * configuration. If no etag is provided in the call to update the instance
    +   * configuration, then the existing instance configuration is overwritten
    +   * blindly.
        * 
    * * string etag = 9; @@ -448,15 +453,16 @@ java.lang.String getLabelsOrDefault( * *
        * etag is used for optimistic concurrency control as a way
    -   * to help prevent simultaneous updates of a instance config from overwriting
    -   * each other. It is strongly suggested that systems make use of the etag in
    -   * the read-modify-write cycle to perform instance config updates in order to
    -   * avoid race conditions: An etag is returned in the response which contains
    -   * instance configs, and systems are expected to put that etag in the request
    -   * to update instance config to ensure that their change will be applied to
    -   * the same version of the instance config.
    -   * If no etag is provided in the call to update instance config, then the
    -   * existing instance config is overwritten blindly.
    +   * to help prevent simultaneous updates of a instance configuration from
    +   * overwriting each other. It is strongly suggested that systems make use of
    +   * the etag in the read-modify-write cycle to perform instance configuration
    +   * updates in order to avoid race conditions: An etag is returned in the
    +   * response which contains instance configurations, and systems are expected
    +   * to put that etag in the request to update instance configuration to ensure
    +   * that their change is applied to the same version of the instance
    +   * configuration. If no etag is provided in the call to update the instance
    +   * configuration, then the existing instance configuration is overwritten
    +   * blindly.
        * 
    * * string etag = 9; @@ -524,8 +530,9 @@ java.lang.String getLabelsOrDefault( * * *
    -   * Output only. If true, the instance config is being created or updated. If
    -   * false, there are no ongoing operations for the instance config.
    +   * Output only. If true, the instance configuration is being created or
    +   * updated. If false, there are no ongoing operations for the instance
    +   * configuration.
        * 
    * * bool reconciling = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -538,7 +545,8 @@ java.lang.String getLabelsOrDefault( * * *
    -   * Output only. The current instance config state.
    +   * Output only. The current instance configuration state. Applicable only for
    +   * `USER_MANAGED` configurations.
        * 
    * * @@ -552,7 +560,8 @@ java.lang.String getLabelsOrDefault( * * *
    -   * Output only. The current instance config state.
    +   * Output only. The current instance configuration state. Applicable only for
    +   * `USER_MANAGED` configurations.
        * 
    * * diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceOrBuilder.java index 1ca3c2a1c29..db7d23f94e0 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstanceOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface InstanceOrBuilder @@ -121,18 +121,25 @@ public interface InstanceOrBuilder * * *
    -   * The number of nodes allocated to this instance. At most one of either
    -   * node_count or processing_units should be present in the message.
    +   * The number of nodes allocated to this instance. At most, one of either
    +   * `node_count` or `processing_units` should be present in the message.
        *
    -   * Users can set the node_count field to specify the target number of nodes
    +   * Users can set the `node_count` field to specify the target number of nodes
        * allocated to the instance.
        *
    -   * This may be zero in API responses for instances that are not yet in state
    -   * `READY`.
    +   * If autoscaling is enabled, `node_count` is treated as an `OUTPUT_ONLY`
    +   * field and reflects the current number of nodes allocated to the instance.
        *
    -   * See [the
    -   * documentation](https://cloud.google.com/spanner/docs/compute-capacity)
    -   * for more information about nodes and processing units.
    +   * This might be zero in API responses for instances that are not yet in the
    +   * `READY` state.
    +   *
    +   * If the instance has varying node count across replicas (achieved by
    +   * setting asymmetric_autoscaling_options in autoscaling config), the
    +   * node_count here is the maximum node count across all replicas.
    +   *
    +   * For more information, see
    +   * [Compute capacity, nodes, and processing
    +   * units](https://cloud.google.com/spanner/docs/compute-capacity).
        * 
    * * int32 node_count = 5; @@ -145,18 +152,27 @@ public interface InstanceOrBuilder * * *
    -   * The number of processing units allocated to this instance. At most one of
    -   * processing_units or node_count should be present in the message.
    +   * The number of processing units allocated to this instance. At most, one of
    +   * either `processing_units` or `node_count` should be present in the message.
        *
    -   * Users can set the processing_units field to specify the target number of
    +   * Users can set the `processing_units` field to specify the target number of
        * processing units allocated to the instance.
        *
    -   * This may be zero in API responses for instances that are not yet in state
    -   * `READY`.
    +   * If autoscaling is enabled, `processing_units` is treated as an
    +   * `OUTPUT_ONLY` field and reflects the current number of processing units
    +   * allocated to the instance.
    +   *
    +   * This might be zero in API responses for instances that are not yet in the
    +   * `READY` state.
    +   *
    +   * If the instance has varying processing units per replica
    +   * (achieved by setting asymmetric_autoscaling_options in autoscaling config),
    +   * the processing_units here is the maximum processing units across all
    +   * replicas.
        *
    -   * See [the
    -   * documentation](https://cloud.google.com/spanner/docs/compute-capacity)
    -   * for more information about nodes and processing units.
    +   * For more information, see
    +   * [Compute capacity, nodes and processing
    +   * units](https://cloud.google.com/spanner/docs/compute-capacity).
        * 
    * * int32 processing_units = 9; @@ -165,6 +181,80 @@ public interface InstanceOrBuilder */ int getProcessingUnits(); + /** + * + * + *
    +   * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +   * selection identifies a set of replicas with common properties. Replicas
    +   * identified by a ReplicaSelection are scaled with the same compute capacity.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List + getReplicaComputeCapacityList(); + /** + * + * + *
    +   * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +   * selection identifies a set of replicas with common properties. Replicas
    +   * identified by a ReplicaSelection are scaled with the same compute capacity.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity getReplicaComputeCapacity(int index); + /** + * + * + *
    +   * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +   * selection identifies a set of replicas with common properties. Replicas
    +   * identified by a ReplicaSelection are scaled with the same compute capacity.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + int getReplicaComputeCapacityCount(); + /** + * + * + *
    +   * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +   * selection identifies a set of replicas with common properties. Replicas
    +   * identified by a ReplicaSelection are scaled with the same compute capacity.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List + getReplicaComputeCapacityOrBuilderList(); + /** + * + * + *
    +   * Output only. Lists the compute capacity per ReplicaSelection. A replica
    +   * selection identifies a set of replicas with common properties. Replicas
    +   * identified by a ReplicaSelection are scaled with the same compute capacity.
    +   * 
    + * + * + * repeated .google.spanner.admin.instance.v1.ReplicaComputeCapacity replica_compute_capacity = 19 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.spanner.admin.instance.v1.ReplicaComputeCapacityOrBuilder + getReplicaComputeCapacityOrBuilder(int index); + /** * * @@ -536,4 +626,33 @@ java.lang.String getLabelsOrDefault( *
    */ com.google.protobuf.TimestampOrBuilder getUpdateTimeOrBuilder(); + + /** + * + * + *
    +   * Optional. The `Edition` of the current instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for edition. + */ + int getEditionValue(); + /** + * + * + *
    +   * Optional. The `Edition` of the current instance.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.Instance.Edition edition = 20 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The edition. + */ + com.google.spanner.admin.instance.v1.Instance.Edition getEdition(); } diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstancePartition.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstancePartition.java index 5c765054240..bd5ea351202 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstancePartition.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstancePartition.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstancePartitionOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstancePartitionOrBuilder.java index 24f2d00bbfa..8299ee692be 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstancePartitionOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/InstancePartitionOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface InstancePartitionOrBuilder diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsRequest.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsRequest.java index b27aea20d29..7790d99d7a3 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsRequest.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** @@ -76,7 +76,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
    -   * Required. The project of the instance config operations.
    +   * Required. The project of the instance configuration operations.
        * Values are of the form `projects/<project>`.
        * 
    * @@ -102,7 +102,7 @@ public java.lang.String getParent() { * * *
    -   * Required. The project of the instance config operations.
    +   * Required. The project of the instance configuration operations.
        * Values are of the form `projects/<project>`.
        * 
    * @@ -173,7 +173,7 @@ public com.google.protobuf.ByteString getParentBytes() { * `(error:*)` - Return operations where: * * The operation's metadata type is * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. - * * The instance config name contains "custom-config". + * * The instance configuration name contains "custom-config". * * The operation started before 2021-03-28T14:50:00Z. * * The operation resulted in an error. * @@ -238,7 +238,7 @@ public java.lang.String getFilter() { * `(error:*)` - Return operations where: * * The operation's metadata type is * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. - * * The instance config name contains "custom-config". + * * The instance configuration name contains "custom-config". * * The operation started before 2021-03-28T14:50:00Z. * * The operation resulted in an error. * @@ -769,7 +769,7 @@ public Builder mergeFrom( * * *
    -     * Required. The project of the instance config operations.
    +     * Required. The project of the instance configuration operations.
          * Values are of the form `projects/<project>`.
          * 
    * @@ -794,7 +794,7 @@ public java.lang.String getParent() { * * *
    -     * Required. The project of the instance config operations.
    +     * Required. The project of the instance configuration operations.
          * Values are of the form `projects/<project>`.
          * 
    * @@ -819,7 +819,7 @@ public com.google.protobuf.ByteString getParentBytes() { * * *
    -     * Required. The project of the instance config operations.
    +     * Required. The project of the instance configuration operations.
          * Values are of the form `projects/<project>`.
          * 
    * @@ -843,7 +843,7 @@ public Builder setParent(java.lang.String value) { * * *
    -     * Required. The project of the instance config operations.
    +     * Required. The project of the instance configuration operations.
          * Values are of the form `projects/<project>`.
          * 
    * @@ -863,7 +863,7 @@ public Builder clearParent() { * * *
    -     * Required. The project of the instance config operations.
    +     * Required. The project of the instance configuration operations.
          * Values are of the form `projects/<project>`.
          * 
    * @@ -930,7 +930,7 @@ public Builder setParentBytes(com.google.protobuf.ByteString value) { * `(error:*)` - Return operations where: * * The operation's metadata type is * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. - * * The instance config name contains "custom-config". + * * The instance configuration name contains "custom-config". * * The operation started before 2021-03-28T14:50:00Z. * * The operation resulted in an error. * @@ -994,7 +994,7 @@ public java.lang.String getFilter() { * `(error:*)` - Return operations where: * * The operation's metadata type is * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. - * * The instance config name contains "custom-config". + * * The instance configuration name contains "custom-config". * * The operation started before 2021-03-28T14:50:00Z. * * The operation resulted in an error. * @@ -1058,7 +1058,7 @@ public com.google.protobuf.ByteString getFilterBytes() { * `(error:*)` - Return operations where: * * The operation's metadata type is * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. - * * The instance config name contains "custom-config". + * * The instance configuration name contains "custom-config". * * The operation started before 2021-03-28T14:50:00Z. * * The operation resulted in an error. * @@ -1121,7 +1121,7 @@ public Builder setFilter(java.lang.String value) { * `(error:*)` - Return operations where: * * The operation's metadata type is * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. - * * The instance config name contains "custom-config". + * * The instance configuration name contains "custom-config". * * The operation started before 2021-03-28T14:50:00Z. * * The operation resulted in an error. * @@ -1180,7 +1180,7 @@ public Builder clearFilter() { * `(error:*)` - Return operations where: * * The operation's metadata type is * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. - * * The instance config name contains "custom-config". + * * The instance configuration name contains "custom-config". * * The operation started before 2021-03-28T14:50:00Z. * * The operation resulted in an error. * diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsRequestOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsRequestOrBuilder.java index 036da7e53d4..8aa43b97043 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface ListInstanceConfigOperationsRequestOrBuilder @@ -28,7 +28,7 @@ public interface ListInstanceConfigOperationsRequestOrBuilder * * *
    -   * Required. The project of the instance config operations.
    +   * Required. The project of the instance configuration operations.
        * Values are of the form `projects/<project>`.
        * 
    * @@ -43,7 +43,7 @@ public interface ListInstanceConfigOperationsRequestOrBuilder * * *
    -   * Required. The project of the instance config operations.
    +   * Required. The project of the instance configuration operations.
        * Values are of the form `projects/<project>`.
        * 
    * @@ -99,7 +99,7 @@ public interface ListInstanceConfigOperationsRequestOrBuilder * `(error:*)` - Return operations where: * * The operation's metadata type is * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. - * * The instance config name contains "custom-config". + * * The instance configuration name contains "custom-config". * * The operation started before 2021-03-28T14:50:00Z. * * The operation resulted in an error. * @@ -153,7 +153,7 @@ public interface ListInstanceConfigOperationsRequestOrBuilder * `(error:*)` - Return operations where: * * The operation's metadata type is * [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. - * * The instance config name contains "custom-config". + * * The instance configuration name contains "custom-config". * * The operation started before 2021-03-28T14:50:00Z. * * The operation resulted in an error. * diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsResponse.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsResponse.java index 9ff2077993b..eb10ad4f5b8 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsResponse.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsResponse.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** @@ -76,9 +76,9 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
    -   * The list of matching instance config [long-running
    +   * The list of matching instance configuration [long-running
        * operations][google.longrunning.Operation]. Each operation's name will be
    -   * prefixed by the instance config's name. The operation's
    +   * prefixed by the name of the instance configuration. The operation's
        * [metadata][google.longrunning.Operation.metadata] field type
        * `metadata.type_url` describes the type of the metadata.
        * 
    @@ -93,9 +93,9 @@ public java.util.List getOperationsList() { * * *
    -   * The list of matching instance config [long-running
    +   * The list of matching instance configuration [long-running
        * operations][google.longrunning.Operation]. Each operation's name will be
    -   * prefixed by the instance config's name. The operation's
    +   * prefixed by the name of the instance configuration. The operation's
        * [metadata][google.longrunning.Operation.metadata] field type
        * `metadata.type_url` describes the type of the metadata.
        * 
    @@ -111,9 +111,9 @@ public java.util.List getOperationsList() { * * *
    -   * The list of matching instance config [long-running
    +   * The list of matching instance configuration [long-running
        * operations][google.longrunning.Operation]. Each operation's name will be
    -   * prefixed by the instance config's name. The operation's
    +   * prefixed by the name of the instance configuration. The operation's
        * [metadata][google.longrunning.Operation.metadata] field type
        * `metadata.type_url` describes the type of the metadata.
        * 
    @@ -128,9 +128,9 @@ public int getOperationsCount() { * * *
    -   * The list of matching instance config [long-running
    +   * The list of matching instance configuration [long-running
        * operations][google.longrunning.Operation]. Each operation's name will be
    -   * prefixed by the instance config's name. The operation's
    +   * prefixed by the name of the instance configuration. The operation's
        * [metadata][google.longrunning.Operation.metadata] field type
        * `metadata.type_url` describes the type of the metadata.
        * 
    @@ -145,9 +145,9 @@ public com.google.longrunning.Operation getOperations(int index) { * * *
    -   * The list of matching instance config [long-running
    +   * The list of matching instance configuration [long-running
        * operations][google.longrunning.Operation]. Each operation's name will be
    -   * prefixed by the instance config's name. The operation's
    +   * prefixed by the name of the instance configuration. The operation's
        * [metadata][google.longrunning.Operation.metadata] field type
        * `metadata.type_url` describes the type of the metadata.
        * 
    @@ -662,9 +662,9 @@ private void ensureOperationsIsMutable() { * * *
    -     * The list of matching instance config [long-running
    +     * The list of matching instance configuration [long-running
          * operations][google.longrunning.Operation]. Each operation's name will be
    -     * prefixed by the instance config's name. The operation's
    +     * prefixed by the name of the instance configuration. The operation's
          * [metadata][google.longrunning.Operation.metadata] field type
          * `metadata.type_url` describes the type of the metadata.
          * 
    @@ -682,9 +682,9 @@ public java.util.List getOperationsList() { * * *
    -     * The list of matching instance config [long-running
    +     * The list of matching instance configuration [long-running
          * operations][google.longrunning.Operation]. Each operation's name will be
    -     * prefixed by the instance config's name. The operation's
    +     * prefixed by the name of the instance configuration. The operation's
          * [metadata][google.longrunning.Operation.metadata] field type
          * `metadata.type_url` describes the type of the metadata.
          * 
    @@ -702,9 +702,9 @@ public int getOperationsCount() { * * *
    -     * The list of matching instance config [long-running
    +     * The list of matching instance configuration [long-running
          * operations][google.longrunning.Operation]. Each operation's name will be
    -     * prefixed by the instance config's name. The operation's
    +     * prefixed by the name of the instance configuration. The operation's
          * [metadata][google.longrunning.Operation.metadata] field type
          * `metadata.type_url` describes the type of the metadata.
          * 
    @@ -722,9 +722,9 @@ public com.google.longrunning.Operation getOperations(int index) { * * *
    -     * The list of matching instance config [long-running
    +     * The list of matching instance configuration [long-running
          * operations][google.longrunning.Operation]. Each operation's name will be
    -     * prefixed by the instance config's name. The operation's
    +     * prefixed by the name of the instance configuration. The operation's
          * [metadata][google.longrunning.Operation.metadata] field type
          * `metadata.type_url` describes the type of the metadata.
          * 
    @@ -748,9 +748,9 @@ public Builder setOperations(int index, com.google.longrunning.Operation value) * * *
    -     * The list of matching instance config [long-running
    +     * The list of matching instance configuration [long-running
          * operations][google.longrunning.Operation]. Each operation's name will be
    -     * prefixed by the instance config's name. The operation's
    +     * prefixed by the name of the instance configuration. The operation's
          * [metadata][google.longrunning.Operation.metadata] field type
          * `metadata.type_url` describes the type of the metadata.
          * 
    @@ -772,9 +772,9 @@ public Builder setOperations( * * *
    -     * The list of matching instance config [long-running
    +     * The list of matching instance configuration [long-running
          * operations][google.longrunning.Operation]. Each operation's name will be
    -     * prefixed by the instance config's name. The operation's
    +     * prefixed by the name of the instance configuration. The operation's
          * [metadata][google.longrunning.Operation.metadata] field type
          * `metadata.type_url` describes the type of the metadata.
          * 
    @@ -798,9 +798,9 @@ public Builder addOperations(com.google.longrunning.Operation value) { * * *
    -     * The list of matching instance config [long-running
    +     * The list of matching instance configuration [long-running
          * operations][google.longrunning.Operation]. Each operation's name will be
    -     * prefixed by the instance config's name. The operation's
    +     * prefixed by the name of the instance configuration. The operation's
          * [metadata][google.longrunning.Operation.metadata] field type
          * `metadata.type_url` describes the type of the metadata.
          * 
    @@ -824,9 +824,9 @@ public Builder addOperations(int index, com.google.longrunning.Operation value) * * *
    -     * The list of matching instance config [long-running
    +     * The list of matching instance configuration [long-running
          * operations][google.longrunning.Operation]. Each operation's name will be
    -     * prefixed by the instance config's name. The operation's
    +     * prefixed by the name of the instance configuration. The operation's
          * [metadata][google.longrunning.Operation.metadata] field type
          * `metadata.type_url` describes the type of the metadata.
          * 
    @@ -847,9 +847,9 @@ public Builder addOperations(com.google.longrunning.Operation.Builder builderFor * * *
    -     * The list of matching instance config [long-running
    +     * The list of matching instance configuration [long-running
          * operations][google.longrunning.Operation]. Each operation's name will be
    -     * prefixed by the instance config's name. The operation's
    +     * prefixed by the name of the instance configuration. The operation's
          * [metadata][google.longrunning.Operation.metadata] field type
          * `metadata.type_url` describes the type of the metadata.
          * 
    @@ -871,9 +871,9 @@ public Builder addOperations( * * *
    -     * The list of matching instance config [long-running
    +     * The list of matching instance configuration [long-running
          * operations][google.longrunning.Operation]. Each operation's name will be
    -     * prefixed by the instance config's name. The operation's
    +     * prefixed by the name of the instance configuration. The operation's
          * [metadata][google.longrunning.Operation.metadata] field type
          * `metadata.type_url` describes the type of the metadata.
          * 
    @@ -895,9 +895,9 @@ public Builder addAllOperations( * * *
    -     * The list of matching instance config [long-running
    +     * The list of matching instance configuration [long-running
          * operations][google.longrunning.Operation]. Each operation's name will be
    -     * prefixed by the instance config's name. The operation's
    +     * prefixed by the name of the instance configuration. The operation's
          * [metadata][google.longrunning.Operation.metadata] field type
          * `metadata.type_url` describes the type of the metadata.
          * 
    @@ -918,9 +918,9 @@ public Builder clearOperations() { * * *
    -     * The list of matching instance config [long-running
    +     * The list of matching instance configuration [long-running
          * operations][google.longrunning.Operation]. Each operation's name will be
    -     * prefixed by the instance config's name. The operation's
    +     * prefixed by the name of the instance configuration. The operation's
          * [metadata][google.longrunning.Operation.metadata] field type
          * `metadata.type_url` describes the type of the metadata.
          * 
    @@ -941,9 +941,9 @@ public Builder removeOperations(int index) { * * *
    -     * The list of matching instance config [long-running
    +     * The list of matching instance configuration [long-running
          * operations][google.longrunning.Operation]. Each operation's name will be
    -     * prefixed by the instance config's name. The operation's
    +     * prefixed by the name of the instance configuration. The operation's
          * [metadata][google.longrunning.Operation.metadata] field type
          * `metadata.type_url` describes the type of the metadata.
          * 
    @@ -957,9 +957,9 @@ public com.google.longrunning.Operation.Builder getOperationsBuilder(int index) * * *
    -     * The list of matching instance config [long-running
    +     * The list of matching instance configuration [long-running
          * operations][google.longrunning.Operation]. Each operation's name will be
    -     * prefixed by the instance config's name. The operation's
    +     * prefixed by the name of the instance configuration. The operation's
          * [metadata][google.longrunning.Operation.metadata] field type
          * `metadata.type_url` describes the type of the metadata.
          * 
    @@ -977,9 +977,9 @@ public com.google.longrunning.OperationOrBuilder getOperationsOrBuilder(int inde * * *
    -     * The list of matching instance config [long-running
    +     * The list of matching instance configuration [long-running
          * operations][google.longrunning.Operation]. Each operation's name will be
    -     * prefixed by the instance config's name. The operation's
    +     * prefixed by the name of the instance configuration. The operation's
          * [metadata][google.longrunning.Operation.metadata] field type
          * `metadata.type_url` describes the type of the metadata.
          * 
    @@ -998,9 +998,9 @@ public com.google.longrunning.OperationOrBuilder getOperationsOrBuilder(int inde * * *
    -     * The list of matching instance config [long-running
    +     * The list of matching instance configuration [long-running
          * operations][google.longrunning.Operation]. Each operation's name will be
    -     * prefixed by the instance config's name. The operation's
    +     * prefixed by the name of the instance configuration. The operation's
          * [metadata][google.longrunning.Operation.metadata] field type
          * `metadata.type_url` describes the type of the metadata.
          * 
    @@ -1015,9 +1015,9 @@ public com.google.longrunning.Operation.Builder addOperationsBuilder() { * * *
    -     * The list of matching instance config [long-running
    +     * The list of matching instance configuration [long-running
          * operations][google.longrunning.Operation]. Each operation's name will be
    -     * prefixed by the instance config's name. The operation's
    +     * prefixed by the name of the instance configuration. The operation's
          * [metadata][google.longrunning.Operation.metadata] field type
          * `metadata.type_url` describes the type of the metadata.
          * 
    @@ -1032,9 +1032,9 @@ public com.google.longrunning.Operation.Builder addOperationsBuilder(int index) * * *
    -     * The list of matching instance config [long-running
    +     * The list of matching instance configuration [long-running
          * operations][google.longrunning.Operation]. Each operation's name will be
    -     * prefixed by the instance config's name. The operation's
    +     * prefixed by the name of the instance configuration. The operation's
          * [metadata][google.longrunning.Operation.metadata] field type
          * `metadata.type_url` describes the type of the metadata.
          * 
    diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsResponseOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsResponseOrBuilder.java index 74a0e09c69b..0636115bd47 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsResponseOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigOperationsResponseOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface ListInstanceConfigOperationsResponseOrBuilder @@ -28,9 +28,9 @@ public interface ListInstanceConfigOperationsResponseOrBuilder * * *
    -   * The list of matching instance config [long-running
    +   * The list of matching instance configuration [long-running
        * operations][google.longrunning.Operation]. Each operation's name will be
    -   * prefixed by the instance config's name. The operation's
    +   * prefixed by the name of the instance configuration. The operation's
        * [metadata][google.longrunning.Operation.metadata] field type
        * `metadata.type_url` describes the type of the metadata.
        * 
    @@ -42,9 +42,9 @@ public interface ListInstanceConfigOperationsResponseOrBuilder * * *
    -   * The list of matching instance config [long-running
    +   * The list of matching instance configuration [long-running
        * operations][google.longrunning.Operation]. Each operation's name will be
    -   * prefixed by the instance config's name. The operation's
    +   * prefixed by the name of the instance configuration. The operation's
        * [metadata][google.longrunning.Operation.metadata] field type
        * `metadata.type_url` describes the type of the metadata.
        * 
    @@ -56,9 +56,9 @@ public interface ListInstanceConfigOperationsResponseOrBuilder * * *
    -   * The list of matching instance config [long-running
    +   * The list of matching instance configuration [long-running
        * operations][google.longrunning.Operation]. Each operation's name will be
    -   * prefixed by the instance config's name. The operation's
    +   * prefixed by the name of the instance configuration. The operation's
        * [metadata][google.longrunning.Operation.metadata] field type
        * `metadata.type_url` describes the type of the metadata.
        * 
    @@ -70,9 +70,9 @@ public interface ListInstanceConfigOperationsResponseOrBuilder * * *
    -   * The list of matching instance config [long-running
    +   * The list of matching instance configuration [long-running
        * operations][google.longrunning.Operation]. Each operation's name will be
    -   * prefixed by the instance config's name. The operation's
    +   * prefixed by the name of the instance configuration. The operation's
        * [metadata][google.longrunning.Operation.metadata] field type
        * `metadata.type_url` describes the type of the metadata.
        * 
    @@ -84,9 +84,9 @@ public interface ListInstanceConfigOperationsResponseOrBuilder * * *
    -   * The list of matching instance config [long-running
    +   * The list of matching instance configuration [long-running
        * operations][google.longrunning.Operation]. Each operation's name will be
    -   * prefixed by the instance config's name. The operation's
    +   * prefixed by the name of the instance configuration. The operation's
        * [metadata][google.longrunning.Operation.metadata] field type
        * `metadata.type_url` describes the type of the metadata.
        * 
    diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsRequest.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsRequest.java index 7e2d4cb7493..43e0ae98c17 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsRequest.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsRequestOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsRequestOrBuilder.java index 400d000f27b..9a5586af064 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface ListInstanceConfigsRequestOrBuilder diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsResponse.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsResponse.java index fba36077b04..bd3e5135668 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsResponse.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsResponse.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsResponseOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsResponseOrBuilder.java index 6aa12ea15ae..e9d4ae5bad6 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsResponseOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstanceConfigsResponseOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface ListInstanceConfigsResponseOrBuilder diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsRequest.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsRequest.java index c27e8bdd6c3..0ed4d357a7e 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsRequest.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsRequestOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsRequestOrBuilder.java index 7a14fa09645..417d98e4217 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface ListInstancePartitionOperationsRequestOrBuilder diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsResponse.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsResponse.java index 69683f9e8d7..6a146626001 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsResponse.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsResponse.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsResponseOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsResponseOrBuilder.java index 697ab046f39..532d290414a 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsResponseOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionOperationsResponseOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface ListInstancePartitionOperationsResponseOrBuilder diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsRequest.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsRequest.java index d985906ce1d..dd4b5784fbd 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsRequest.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsRequestOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsRequestOrBuilder.java index 7d457eb1ee2..c305bad2129 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface ListInstancePartitionsRequestOrBuilder diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsResponse.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsResponse.java index 232e74b7c3b..77fadf1fe14 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsResponse.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsResponse.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsResponseOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsResponseOrBuilder.java index c3c66230d90..2ad1ffb742a 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsResponseOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancePartitionsResponseOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface ListInstancePartitionsResponseOrBuilder diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesRequest.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesRequest.java index 24de0923825..e5aff200595 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesRequest.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesRequestOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesRequestOrBuilder.java index 90e17edfedf..b983f6f3e46 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesRequestOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface ListInstancesRequestOrBuilder diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesResponse.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesResponse.java index f8974780add..34798359aa4 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesResponse.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesResponse.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesResponseOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesResponseOrBuilder.java index 6d6269f2c0e..6463e3b85c2 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesResponseOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ListInstancesResponseOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface ListInstancesResponseOrBuilder diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceMetadata.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceMetadata.java new file mode 100644 index 00000000000..6fcf6980756 --- /dev/null +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceMetadata.java @@ -0,0 +1,1228 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * Metadata type for the operation returned by
    + * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.MoveInstanceMetadata} + */ +public final class MoveInstanceMetadata extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.MoveInstanceMetadata) + MoveInstanceMetadataOrBuilder { + private static final long serialVersionUID = 0L; + // Use MoveInstanceMetadata.newBuilder() to construct. + private MoveInstanceMetadata(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private MoveInstanceMetadata() { + targetConfig_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new MoveInstanceMetadata(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.MoveInstanceMetadata.class, + com.google.spanner.admin.instance.v1.MoveInstanceMetadata.Builder.class); + } + + private int bitField0_; + public static final int TARGET_CONFIG_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object targetConfig_ = ""; + /** + * + * + *
    +   * The target instance configuration where to move the instance.
    +   * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +   * 
    + * + * string target_config = 1; + * + * @return The targetConfig. + */ + @java.lang.Override + public java.lang.String getTargetConfig() { + java.lang.Object ref = targetConfig_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + targetConfig_ = s; + return s; + } + } + /** + * + * + *
    +   * The target instance configuration where to move the instance.
    +   * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +   * 
    + * + * string target_config = 1; + * + * @return The bytes for targetConfig. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTargetConfigBytes() { + java.lang.Object ref = targetConfig_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + targetConfig_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROGRESS_FIELD_NUMBER = 2; + private com.google.spanner.admin.instance.v1.OperationProgress progress_; + /** + * + * + *
    +   * The progress of the
    +   * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +   * operation.
    +   * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +   * is reset when cancellation is requested.
    +   * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + * + * @return Whether the progress field is set. + */ + @java.lang.Override + public boolean hasProgress() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + *
    +   * The progress of the
    +   * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +   * operation.
    +   * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +   * is reset when cancellation is requested.
    +   * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + * + * @return The progress. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.OperationProgress getProgress() { + return progress_ == null + ? com.google.spanner.admin.instance.v1.OperationProgress.getDefaultInstance() + : progress_; + } + /** + * + * + *
    +   * The progress of the
    +   * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +   * operation.
    +   * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +   * is reset when cancellation is requested.
    +   * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.OperationProgressOrBuilder getProgressOrBuilder() { + return progress_ == null + ? com.google.spanner.admin.instance.v1.OperationProgress.getDefaultInstance() + : progress_; + } + + public static final int CANCEL_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp cancelTime_; + /** + * + * + *
    +   * The time at which this operation was cancelled.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + @java.lang.Override + public boolean hasCancelTime() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * + * + *
    +   * The time at which this operation was cancelled.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getCancelTime() { + return cancelTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : cancelTime_; + } + /** + * + * + *
    +   * The time at which this operation was cancelled.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { + return cancelTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : cancelTime_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(targetConfig_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, targetConfig_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(2, getProgress()); + } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getCancelTime()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(targetConfig_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, targetConfig_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getProgress()); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getCancelTime()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.MoveInstanceMetadata)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.MoveInstanceMetadata other = + (com.google.spanner.admin.instance.v1.MoveInstanceMetadata) obj; + + if (!getTargetConfig().equals(other.getTargetConfig())) return false; + if (hasProgress() != other.hasProgress()) return false; + if (hasProgress()) { + if (!getProgress().equals(other.getProgress())) return false; + } + if (hasCancelTime() != other.hasCancelTime()) return false; + if (hasCancelTime()) { + if (!getCancelTime().equals(other.getCancelTime())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + TARGET_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getTargetConfig().hashCode(); + if (hasProgress()) { + hash = (37 * hash) + PROGRESS_FIELD_NUMBER; + hash = (53 * hash) + getProgress().hashCode(); + } + if (hasCancelTime()) { + hash = (37 * hash) + CANCEL_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCancelTime().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.MoveInstanceMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
    +   * Metadata type for the operation returned by
    +   * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.MoveInstanceMetadata} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.MoveInstanceMetadata) + com.google.spanner.admin.instance.v1.MoveInstanceMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.MoveInstanceMetadata.class, + com.google.spanner.admin.instance.v1.MoveInstanceMetadata.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.MoveInstanceMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getProgressFieldBuilder(); + getCancelTimeFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + targetConfig_ = ""; + progress_ = null; + if (progressBuilder_ != null) { + progressBuilder_.dispose(); + progressBuilder_ = null; + } + cancelTime_ = null; + if (cancelTimeBuilder_ != null) { + cancelTimeBuilder_.dispose(); + cancelTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceMetadata_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.MoveInstanceMetadata getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.MoveInstanceMetadata.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.MoveInstanceMetadata build() { + com.google.spanner.admin.instance.v1.MoveInstanceMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.MoveInstanceMetadata buildPartial() { + com.google.spanner.admin.instance.v1.MoveInstanceMetadata result = + new com.google.spanner.admin.instance.v1.MoveInstanceMetadata(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.instance.v1.MoveInstanceMetadata result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.targetConfig_ = targetConfig_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000002) != 0)) { + result.progress_ = progressBuilder_ == null ? progress_ : progressBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.cancelTime_ = cancelTimeBuilder_ == null ? cancelTime_ : cancelTimeBuilder_.build(); + to_bitField0_ |= 0x00000002; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.MoveInstanceMetadata) { + return mergeFrom((com.google.spanner.admin.instance.v1.MoveInstanceMetadata) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.MoveInstanceMetadata other) { + if (other == com.google.spanner.admin.instance.v1.MoveInstanceMetadata.getDefaultInstance()) + return this; + if (!other.getTargetConfig().isEmpty()) { + targetConfig_ = other.targetConfig_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (other.hasProgress()) { + mergeProgress(other.getProgress()); + } + if (other.hasCancelTime()) { + mergeCancelTime(other.getCancelTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + targetConfig_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + input.readMessage(getProgressFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + input.readMessage(getCancelTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object targetConfig_ = ""; + /** + * + * + *
    +     * The target instance configuration where to move the instance.
    +     * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * string target_config = 1; + * + * @return The targetConfig. + */ + public java.lang.String getTargetConfig() { + java.lang.Object ref = targetConfig_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + targetConfig_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
    +     * The target instance configuration where to move the instance.
    +     * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * string target_config = 1; + * + * @return The bytes for targetConfig. + */ + public com.google.protobuf.ByteString getTargetConfigBytes() { + java.lang.Object ref = targetConfig_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + targetConfig_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
    +     * The target instance configuration where to move the instance.
    +     * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * string target_config = 1; + * + * @param value The targetConfig to set. + * @return This builder for chaining. + */ + public Builder setTargetConfig(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + targetConfig_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
    +     * The target instance configuration where to move the instance.
    +     * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * string target_config = 1; + * + * @return This builder for chaining. + */ + public Builder clearTargetConfig() { + targetConfig_ = getDefaultInstance().getTargetConfig(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
    +     * The target instance configuration where to move the instance.
    +     * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * string target_config = 1; + * + * @param value The bytes for targetConfig to set. + * @return This builder for chaining. + */ + public Builder setTargetConfigBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + targetConfig_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private com.google.spanner.admin.instance.v1.OperationProgress progress_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.instance.v1.OperationProgress, + com.google.spanner.admin.instance.v1.OperationProgress.Builder, + com.google.spanner.admin.instance.v1.OperationProgressOrBuilder> + progressBuilder_; + /** + * + * + *
    +     * The progress of the
    +     * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +     * operation.
    +     * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +     * is reset when cancellation is requested.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + * + * @return Whether the progress field is set. + */ + public boolean hasProgress() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * + * + *
    +     * The progress of the
    +     * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +     * operation.
    +     * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +     * is reset when cancellation is requested.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + * + * @return The progress. + */ + public com.google.spanner.admin.instance.v1.OperationProgress getProgress() { + if (progressBuilder_ == null) { + return progress_ == null + ? com.google.spanner.admin.instance.v1.OperationProgress.getDefaultInstance() + : progress_; + } else { + return progressBuilder_.getMessage(); + } + } + /** + * + * + *
    +     * The progress of the
    +     * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +     * operation.
    +     * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +     * is reset when cancellation is requested.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + public Builder setProgress(com.google.spanner.admin.instance.v1.OperationProgress value) { + if (progressBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + progress_ = value; + } else { + progressBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
    +     * The progress of the
    +     * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +     * operation.
    +     * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +     * is reset when cancellation is requested.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + public Builder setProgress( + com.google.spanner.admin.instance.v1.OperationProgress.Builder builderForValue) { + if (progressBuilder_ == null) { + progress_ = builderForValue.build(); + } else { + progressBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
    +     * The progress of the
    +     * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +     * operation.
    +     * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +     * is reset when cancellation is requested.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + public Builder mergeProgress(com.google.spanner.admin.instance.v1.OperationProgress value) { + if (progressBuilder_ == null) { + if (((bitField0_ & 0x00000002) != 0) + && progress_ != null + && progress_ + != com.google.spanner.admin.instance.v1.OperationProgress.getDefaultInstance()) { + getProgressBuilder().mergeFrom(value); + } else { + progress_ = value; + } + } else { + progressBuilder_.mergeFrom(value); + } + if (progress_ != null) { + bitField0_ |= 0x00000002; + onChanged(); + } + return this; + } + /** + * + * + *
    +     * The progress of the
    +     * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +     * operation.
    +     * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +     * is reset when cancellation is requested.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + public Builder clearProgress() { + bitField0_ = (bitField0_ & ~0x00000002); + progress_ = null; + if (progressBuilder_ != null) { + progressBuilder_.dispose(); + progressBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
    +     * The progress of the
    +     * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +     * operation.
    +     * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +     * is reset when cancellation is requested.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + public com.google.spanner.admin.instance.v1.OperationProgress.Builder getProgressBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getProgressFieldBuilder().getBuilder(); + } + /** + * + * + *
    +     * The progress of the
    +     * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +     * operation.
    +     * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +     * is reset when cancellation is requested.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + public com.google.spanner.admin.instance.v1.OperationProgressOrBuilder getProgressOrBuilder() { + if (progressBuilder_ != null) { + return progressBuilder_.getMessageOrBuilder(); + } else { + return progress_ == null + ? com.google.spanner.admin.instance.v1.OperationProgress.getDefaultInstance() + : progress_; + } + } + /** + * + * + *
    +     * The progress of the
    +     * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +     * operation.
    +     * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +     * is reset when cancellation is requested.
    +     * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.instance.v1.OperationProgress, + com.google.spanner.admin.instance.v1.OperationProgress.Builder, + com.google.spanner.admin.instance.v1.OperationProgressOrBuilder> + getProgressFieldBuilder() { + if (progressBuilder_ == null) { + progressBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.instance.v1.OperationProgress, + com.google.spanner.admin.instance.v1.OperationProgress.Builder, + com.google.spanner.admin.instance.v1.OperationProgressOrBuilder>( + getProgress(), getParentForChildren(), isClean()); + progress_ = null; + } + return progressBuilder_; + } + + private com.google.protobuf.Timestamp cancelTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + cancelTimeBuilder_; + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + public boolean hasCancelTime() { + return ((bitField0_ & 0x00000004) != 0); + } + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + public com.google.protobuf.Timestamp getCancelTime() { + if (cancelTimeBuilder_ == null) { + return cancelTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : cancelTime_; + } else { + return cancelTimeBuilder_.getMessage(); + } + } + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder setCancelTime(com.google.protobuf.Timestamp value) { + if (cancelTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + cancelTime_ = value; + } else { + cancelTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder setCancelTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (cancelTimeBuilder_ == null) { + cancelTime_ = builderForValue.build(); + } else { + cancelTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder mergeCancelTime(com.google.protobuf.Timestamp value) { + if (cancelTimeBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && cancelTime_ != null + && cancelTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getCancelTimeBuilder().mergeFrom(value); + } else { + cancelTime_ = value; + } + } else { + cancelTimeBuilder_.mergeFrom(value); + } + if (cancelTime_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public Builder clearCancelTime() { + bitField0_ = (bitField0_ & ~0x00000004); + cancelTime_ = null; + if (cancelTimeBuilder_ != null) { + cancelTimeBuilder_.dispose(); + cancelTimeBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public com.google.protobuf.Timestamp.Builder getCancelTimeBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getCancelTimeFieldBuilder().getBuilder(); + } + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + public com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder() { + if (cancelTimeBuilder_ != null) { + return cancelTimeBuilder_.getMessageOrBuilder(); + } else { + return cancelTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : cancelTime_; + } + } + /** + * + * + *
    +     * The time at which this operation was cancelled.
    +     * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getCancelTimeFieldBuilder() { + if (cancelTimeBuilder_ == null) { + cancelTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCancelTime(), getParentForChildren(), isClean()); + cancelTime_ = null; + } + return cancelTimeBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.MoveInstanceMetadata) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.MoveInstanceMetadata) + private static final com.google.spanner.admin.instance.v1.MoveInstanceMetadata DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.MoveInstanceMetadata(); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceMetadata getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public MoveInstanceMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.MoveInstanceMetadata getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceMetadataOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceMetadataOrBuilder.java new file mode 100644 index 00000000000..85cb925e2ad --- /dev/null +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceMetadataOrBuilder.java @@ -0,0 +1,135 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.instance.v1; + +public interface MoveInstanceMetadataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.MoveInstanceMetadata) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * The target instance configuration where to move the instance.
    +   * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +   * 
    + * + * string target_config = 1; + * + * @return The targetConfig. + */ + java.lang.String getTargetConfig(); + /** + * + * + *
    +   * The target instance configuration where to move the instance.
    +   * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +   * 
    + * + * string target_config = 1; + * + * @return The bytes for targetConfig. + */ + com.google.protobuf.ByteString getTargetConfigBytes(); + + /** + * + * + *
    +   * The progress of the
    +   * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +   * operation.
    +   * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +   * is reset when cancellation is requested.
    +   * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + * + * @return Whether the progress field is set. + */ + boolean hasProgress(); + /** + * + * + *
    +   * The progress of the
    +   * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +   * operation.
    +   * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +   * is reset when cancellation is requested.
    +   * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + * + * @return The progress. + */ + com.google.spanner.admin.instance.v1.OperationProgress getProgress(); + /** + * + * + *
    +   * The progress of the
    +   * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +   * operation.
    +   * [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +   * is reset when cancellation is requested.
    +   * 
    + * + * .google.spanner.admin.instance.v1.OperationProgress progress = 2; + */ + com.google.spanner.admin.instance.v1.OperationProgressOrBuilder getProgressOrBuilder(); + + /** + * + * + *
    +   * The time at which this operation was cancelled.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return Whether the cancelTime field is set. + */ + boolean hasCancelTime(); + /** + * + * + *
    +   * The time at which this operation was cancelled.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + * + * @return The cancelTime. + */ + com.google.protobuf.Timestamp getCancelTime(); + /** + * + * + *
    +   * The time at which this operation was cancelled.
    +   * 
    + * + * .google.protobuf.Timestamp cancel_time = 3; + */ + com.google.protobuf.TimestampOrBuilder getCancelTimeOrBuilder(); +} diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceRequest.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceRequest.java new file mode 100644 index 00000000000..9083215eb78 --- /dev/null +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceRequest.java @@ -0,0 +1,852 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The request for
    + * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.MoveInstanceRequest} + */ +public final class MoveInstanceRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.MoveInstanceRequest) + MoveInstanceRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use MoveInstanceRequest.newBuilder() to construct. + private MoveInstanceRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private MoveInstanceRequest() { + name_ = ""; + targetConfig_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new MoveInstanceRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.MoveInstanceRequest.class, + com.google.spanner.admin.instance.v1.MoveInstanceRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object name_ = ""; + /** + * + * + *
    +   * Required. The instance to move.
    +   * Values are of the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + @java.lang.Override + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
    +   * Required. The instance to move.
    +   * Values are of the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + @java.lang.Override + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TARGET_CONFIG_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object targetConfig_ = ""; + /** + * + * + *
    +   * Required. The target instance configuration where to move the instance.
    +   * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +   * 
    + * + * + * string target_config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The targetConfig. + */ + @java.lang.Override + public java.lang.String getTargetConfig() { + java.lang.Object ref = targetConfig_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + targetConfig_ = s; + return s; + } + } + /** + * + * + *
    +   * Required. The target instance configuration where to move the instance.
    +   * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +   * 
    + * + * + * string target_config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for targetConfig. + */ + @java.lang.Override + public com.google.protobuf.ByteString getTargetConfigBytes() { + java.lang.Object ref = targetConfig_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + targetConfig_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(targetConfig_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, targetConfig_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(targetConfig_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, targetConfig_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.MoveInstanceRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.MoveInstanceRequest other = + (com.google.spanner.admin.instance.v1.MoveInstanceRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!getTargetConfig().equals(other.getTargetConfig())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + TARGET_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getTargetConfig().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.MoveInstanceRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
    +   * The request for
    +   * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.MoveInstanceRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.MoveInstanceRequest) + com.google.spanner.admin.instance.v1.MoveInstanceRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.MoveInstanceRequest.class, + com.google.spanner.admin.instance.v1.MoveInstanceRequest.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.MoveInstanceRequest.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + name_ = ""; + targetConfig_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.MoveInstanceRequest getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.MoveInstanceRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.MoveInstanceRequest build() { + com.google.spanner.admin.instance.v1.MoveInstanceRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.MoveInstanceRequest buildPartial() { + com.google.spanner.admin.instance.v1.MoveInstanceRequest result = + new com.google.spanner.admin.instance.v1.MoveInstanceRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.instance.v1.MoveInstanceRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.name_ = name_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.targetConfig_ = targetConfig_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.MoveInstanceRequest) { + return mergeFrom((com.google.spanner.admin.instance.v1.MoveInstanceRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.MoveInstanceRequest other) { + if (other == com.google.spanner.admin.instance.v1.MoveInstanceRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getTargetConfig().isEmpty()) { + targetConfig_ = other.targetConfig_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + name_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + targetConfig_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + /** + * + * + *
    +     * Required. The instance to move.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
    +     * Required. The instance to move.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
    +     * Required. The instance to move.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. The instance to move.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + name_ = getDefaultInstance().getName(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. The instance to move.
    +     * Values are of the form `projects/<project>/instances/<instance>`.
    +     * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + name_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object targetConfig_ = ""; + /** + * + * + *
    +     * Required. The target instance configuration where to move the instance.
    +     * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * + * string target_config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The targetConfig. + */ + public java.lang.String getTargetConfig() { + java.lang.Object ref = targetConfig_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + targetConfig_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
    +     * Required. The target instance configuration where to move the instance.
    +     * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * + * string target_config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for targetConfig. + */ + public com.google.protobuf.ByteString getTargetConfigBytes() { + java.lang.Object ref = targetConfig_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + targetConfig_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
    +     * Required. The target instance configuration where to move the instance.
    +     * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * + * string target_config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The targetConfig to set. + * @return This builder for chaining. + */ + public Builder setTargetConfig(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + targetConfig_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. The target instance configuration where to move the instance.
    +     * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * + * string target_config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearTargetConfig() { + targetConfig_ = getDefaultInstance().getTargetConfig(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. The target instance configuration where to move the instance.
    +     * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +     * 
    + * + * + * string target_config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for targetConfig to set. + * @return This builder for chaining. + */ + public Builder setTargetConfigBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + targetConfig_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.MoveInstanceRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.MoveInstanceRequest) + private static final com.google.spanner.admin.instance.v1.MoveInstanceRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.MoveInstanceRequest(); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public MoveInstanceRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.MoveInstanceRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceRequestOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceRequestOrBuilder.java new file mode 100644 index 00000000000..8c573fdd284 --- /dev/null +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceRequestOrBuilder.java @@ -0,0 +1,88 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.instance.v1; + +public interface MoveInstanceRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.MoveInstanceRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. The instance to move.
    +   * Values are of the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + *
    +   * Required. The instance to move.
    +   * Values are of the form `projects/<project>/instances/<instance>`.
    +   * 
    + * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
    +   * Required. The target instance configuration where to move the instance.
    +   * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +   * 
    + * + * + * string target_config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The targetConfig. + */ + java.lang.String getTargetConfig(); + /** + * + * + *
    +   * Required. The target instance configuration where to move the instance.
    +   * Values are of the form `projects/<project>/instanceConfigs/<config>`.
    +   * 
    + * + * + * string target_config = 2 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for targetConfig. + */ + com.google.protobuf.ByteString getTargetConfigBytes(); +} diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceResponse.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceResponse.java new file mode 100644 index 00000000000..cbc73eef087 --- /dev/null +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceResponse.java @@ -0,0 +1,435 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * The response for
    + * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance].
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.MoveInstanceResponse} + */ +public final class MoveInstanceResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.MoveInstanceResponse) + MoveInstanceResponseOrBuilder { + private static final long serialVersionUID = 0L; + // Use MoveInstanceResponse.newBuilder() to construct. + private MoveInstanceResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private MoveInstanceResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new MoveInstanceResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.MoveInstanceResponse.class, + com.google.spanner.admin.instance.v1.MoveInstanceResponse.Builder.class); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.MoveInstanceResponse)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.MoveInstanceResponse other = + (com.google.spanner.admin.instance.v1.MoveInstanceResponse) obj; + + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.MoveInstanceResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
    +   * The response for
    +   * [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance].
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.MoveInstanceResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.MoveInstanceResponse) + com.google.spanner.admin.instance.v1.MoveInstanceResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.MoveInstanceResponse.class, + com.google.spanner.admin.instance.v1.MoveInstanceResponse.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.MoveInstanceResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_MoveInstanceResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.MoveInstanceResponse getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.MoveInstanceResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.MoveInstanceResponse build() { + com.google.spanner.admin.instance.v1.MoveInstanceResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.MoveInstanceResponse buildPartial() { + com.google.spanner.admin.instance.v1.MoveInstanceResponse result = + new com.google.spanner.admin.instance.v1.MoveInstanceResponse(this); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.MoveInstanceResponse) { + return mergeFrom((com.google.spanner.admin.instance.v1.MoveInstanceResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.MoveInstanceResponse other) { + if (other == com.google.spanner.admin.instance.v1.MoveInstanceResponse.getDefaultInstance()) + return this; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.MoveInstanceResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.MoveInstanceResponse) + private static final com.google.spanner.admin.instance.v1.MoveInstanceResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.MoveInstanceResponse(); + } + + public static com.google.spanner.admin.instance.v1.MoveInstanceResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public MoveInstanceResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.MoveInstanceResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceResponseOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceResponseOrBuilder.java new file mode 100644 index 00000000000..ecb879953c7 --- /dev/null +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/MoveInstanceResponseOrBuilder.java @@ -0,0 +1,25 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.instance.v1; + +public interface MoveInstanceResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.MoveInstanceResponse) + com.google.protobuf.MessageOrBuilder {} diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/OperationProgress.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/OperationProgress.java index 5cac14590ba..55a34ebc5fb 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/OperationProgress.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/OperationProgress.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/common.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/OperationProgressOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/OperationProgressOrBuilder.java index a2cbadfa958..dbf7800b404 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/OperationProgressOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/OperationProgressOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/common.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface OperationProgressOrBuilder diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaComputeCapacity.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaComputeCapacity.java new file mode 100644 index 00000000000..1e64f6b80b9 --- /dev/null +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaComputeCapacity.java @@ -0,0 +1,1154 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * ReplicaComputeCapacity describes the amount of server resources that are
    + * allocated to each replica identified by the replica selection.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ReplicaComputeCapacity} + */ +public final class ReplicaComputeCapacity extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.ReplicaComputeCapacity) + ReplicaComputeCapacityOrBuilder { + private static final long serialVersionUID = 0L; + // Use ReplicaComputeCapacity.newBuilder() to construct. + private ReplicaComputeCapacity(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ReplicaComputeCapacity() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ReplicaComputeCapacity(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ReplicaComputeCapacity_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ReplicaComputeCapacity_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.class, + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.Builder.class); + } + + private int bitField0_; + private int computeCapacityCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object computeCapacity_; + + public enum ComputeCapacityCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + NODE_COUNT(2), + PROCESSING_UNITS(3), + COMPUTECAPACITY_NOT_SET(0); + private final int value; + + private ComputeCapacityCase(int value) { + this.value = value; + } + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ComputeCapacityCase valueOf(int value) { + return forNumber(value); + } + + public static ComputeCapacityCase forNumber(int value) { + switch (value) { + case 2: + return NODE_COUNT; + case 3: + return PROCESSING_UNITS; + case 0: + return COMPUTECAPACITY_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public ComputeCapacityCase getComputeCapacityCase() { + return ComputeCapacityCase.forNumber(computeCapacityCase_); + } + + public static final int REPLICA_SELECTION_FIELD_NUMBER = 1; + private com.google.spanner.admin.instance.v1.ReplicaSelection replicaSelection_; + /** + * + * + *
    +   * Required. Identifies replicas by specified properties.
    +   * All replicas in the selection have the same amount of compute capacity.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the replicaSelection field is set. + */ + @java.lang.Override + public boolean hasReplicaSelection() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + *
    +   * Required. Identifies replicas by specified properties.
    +   * All replicas in the selection have the same amount of compute capacity.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The replicaSelection. + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaSelection getReplicaSelection() { + return replicaSelection_ == null + ? com.google.spanner.admin.instance.v1.ReplicaSelection.getDefaultInstance() + : replicaSelection_; + } + /** + * + * + *
    +   * Required. Identifies replicas by specified properties.
    +   * All replicas in the selection have the same amount of compute capacity.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder + getReplicaSelectionOrBuilder() { + return replicaSelection_ == null + ? com.google.spanner.admin.instance.v1.ReplicaSelection.getDefaultInstance() + : replicaSelection_; + } + + public static final int NODE_COUNT_FIELD_NUMBER = 2; + /** + * + * + *
    +   * The number of nodes allocated to each replica.
    +   *
    +   * This may be zero in API responses for instances that are not yet in
    +   * state `READY`.
    +   * 
    + * + * int32 node_count = 2; + * + * @return Whether the nodeCount field is set. + */ + @java.lang.Override + public boolean hasNodeCount() { + return computeCapacityCase_ == 2; + } + /** + * + * + *
    +   * The number of nodes allocated to each replica.
    +   *
    +   * This may be zero in API responses for instances that are not yet in
    +   * state `READY`.
    +   * 
    + * + * int32 node_count = 2; + * + * @return The nodeCount. + */ + @java.lang.Override + public int getNodeCount() { + if (computeCapacityCase_ == 2) { + return (java.lang.Integer) computeCapacity_; + } + return 0; + } + + public static final int PROCESSING_UNITS_FIELD_NUMBER = 3; + /** + * + * + *
    +   * The number of processing units allocated to each replica.
    +   *
    +   * This may be zero in API responses for instances that are not yet in
    +   * state `READY`.
    +   * 
    + * + * int32 processing_units = 3; + * + * @return Whether the processingUnits field is set. + */ + @java.lang.Override + public boolean hasProcessingUnits() { + return computeCapacityCase_ == 3; + } + /** + * + * + *
    +   * The number of processing units allocated to each replica.
    +   *
    +   * This may be zero in API responses for instances that are not yet in
    +   * state `READY`.
    +   * 
    + * + * int32 processing_units = 3; + * + * @return The processingUnits. + */ + @java.lang.Override + public int getProcessingUnits() { + if (computeCapacityCase_ == 3) { + return (java.lang.Integer) computeCapacity_; + } + return 0; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getReplicaSelection()); + } + if (computeCapacityCase_ == 2) { + output.writeInt32(2, (int) ((java.lang.Integer) computeCapacity_)); + } + if (computeCapacityCase_ == 3) { + output.writeInt32(3, (int) ((java.lang.Integer) computeCapacity_)); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getReplicaSelection()); + } + if (computeCapacityCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeInt32Size( + 2, (int) ((java.lang.Integer) computeCapacity_)); + } + if (computeCapacityCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeInt32Size( + 3, (int) ((java.lang.Integer) computeCapacity_)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.ReplicaComputeCapacity)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity other = + (com.google.spanner.admin.instance.v1.ReplicaComputeCapacity) obj; + + if (hasReplicaSelection() != other.hasReplicaSelection()) return false; + if (hasReplicaSelection()) { + if (!getReplicaSelection().equals(other.getReplicaSelection())) return false; + } + if (!getComputeCapacityCase().equals(other.getComputeCapacityCase())) return false; + switch (computeCapacityCase_) { + case 2: + if (getNodeCount() != other.getNodeCount()) return false; + break; + case 3: + if (getProcessingUnits() != other.getProcessingUnits()) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasReplicaSelection()) { + hash = (37 * hash) + REPLICA_SELECTION_FIELD_NUMBER; + hash = (53 * hash) + getReplicaSelection().hashCode(); + } + switch (computeCapacityCase_) { + case 2: + hash = (37 * hash) + NODE_COUNT_FIELD_NUMBER; + hash = (53 * hash) + getNodeCount(); + break; + case 3: + hash = (37 * hash) + PROCESSING_UNITS_FIELD_NUMBER; + hash = (53 * hash) + getProcessingUnits(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
    +   * ReplicaComputeCapacity describes the amount of server resources that are
    +   * allocated to each replica identified by the replica selection.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ReplicaComputeCapacity} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.ReplicaComputeCapacity) + com.google.spanner.admin.instance.v1.ReplicaComputeCapacityOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ReplicaComputeCapacity_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ReplicaComputeCapacity_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.class, + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getReplicaSelectionFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + replicaSelection_ = null; + if (replicaSelectionBuilder_ != null) { + replicaSelectionBuilder_.dispose(); + replicaSelectionBuilder_ = null; + } + computeCapacityCase_ = 0; + computeCapacity_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.SpannerInstanceAdminProto + .internal_static_google_spanner_admin_instance_v1_ReplicaComputeCapacity_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaComputeCapacity getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaComputeCapacity build() { + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaComputeCapacity buildPartial() { + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity result = + new com.google.spanner.admin.instance.v1.ReplicaComputeCapacity(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.instance.v1.ReplicaComputeCapacity result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.replicaSelection_ = + replicaSelectionBuilder_ == null ? replicaSelection_ : replicaSelectionBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + private void buildPartialOneofs( + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity result) { + result.computeCapacityCase_ = computeCapacityCase_; + result.computeCapacity_ = this.computeCapacity_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.ReplicaComputeCapacity) { + return mergeFrom((com.google.spanner.admin.instance.v1.ReplicaComputeCapacity) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.ReplicaComputeCapacity other) { + if (other == com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.getDefaultInstance()) + return this; + if (other.hasReplicaSelection()) { + mergeReplicaSelection(other.getReplicaSelection()); + } + switch (other.getComputeCapacityCase()) { + case NODE_COUNT: + { + setNodeCount(other.getNodeCount()); + break; + } + case PROCESSING_UNITS: + { + setProcessingUnits(other.getProcessingUnits()); + break; + } + case COMPUTECAPACITY_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + getReplicaSelectionFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + computeCapacity_ = input.readInt32(); + computeCapacityCase_ = 2; + break; + } // case 16 + case 24: + { + computeCapacity_ = input.readInt32(); + computeCapacityCase_ = 3; + break; + } // case 24 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int computeCapacityCase_ = 0; + private java.lang.Object computeCapacity_; + + public ComputeCapacityCase getComputeCapacityCase() { + return ComputeCapacityCase.forNumber(computeCapacityCase_); + } + + public Builder clearComputeCapacity() { + computeCapacityCase_ = 0; + computeCapacity_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.spanner.admin.instance.v1.ReplicaSelection replicaSelection_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.instance.v1.ReplicaSelection, + com.google.spanner.admin.instance.v1.ReplicaSelection.Builder, + com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder> + replicaSelectionBuilder_; + /** + * + * + *
    +     * Required. Identifies replicas by specified properties.
    +     * All replicas in the selection have the same amount of compute capacity.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the replicaSelection field is set. + */ + public boolean hasReplicaSelection() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + *
    +     * Required. Identifies replicas by specified properties.
    +     * All replicas in the selection have the same amount of compute capacity.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The replicaSelection. + */ + public com.google.spanner.admin.instance.v1.ReplicaSelection getReplicaSelection() { + if (replicaSelectionBuilder_ == null) { + return replicaSelection_ == null + ? com.google.spanner.admin.instance.v1.ReplicaSelection.getDefaultInstance() + : replicaSelection_; + } else { + return replicaSelectionBuilder_.getMessage(); + } + } + /** + * + * + *
    +     * Required. Identifies replicas by specified properties.
    +     * All replicas in the selection have the same amount of compute capacity.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setReplicaSelection( + com.google.spanner.admin.instance.v1.ReplicaSelection value) { + if (replicaSelectionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + replicaSelection_ = value; + } else { + replicaSelectionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. Identifies replicas by specified properties.
    +     * All replicas in the selection have the same amount of compute capacity.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setReplicaSelection( + com.google.spanner.admin.instance.v1.ReplicaSelection.Builder builderForValue) { + if (replicaSelectionBuilder_ == null) { + replicaSelection_ = builderForValue.build(); + } else { + replicaSelectionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. Identifies replicas by specified properties.
    +     * All replicas in the selection have the same amount of compute capacity.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeReplicaSelection( + com.google.spanner.admin.instance.v1.ReplicaSelection value) { + if (replicaSelectionBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && replicaSelection_ != null + && replicaSelection_ + != com.google.spanner.admin.instance.v1.ReplicaSelection.getDefaultInstance()) { + getReplicaSelectionBuilder().mergeFrom(value); + } else { + replicaSelection_ = value; + } + } else { + replicaSelectionBuilder_.mergeFrom(value); + } + if (replicaSelection_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + /** + * + * + *
    +     * Required. Identifies replicas by specified properties.
    +     * All replicas in the selection have the same amount of compute capacity.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearReplicaSelection() { + bitField0_ = (bitField0_ & ~0x00000001); + replicaSelection_ = null; + if (replicaSelectionBuilder_ != null) { + replicaSelectionBuilder_.dispose(); + replicaSelectionBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. Identifies replicas by specified properties.
    +     * All replicas in the selection have the same amount of compute capacity.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.instance.v1.ReplicaSelection.Builder + getReplicaSelectionBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getReplicaSelectionFieldBuilder().getBuilder(); + } + /** + * + * + *
    +     * Required. Identifies replicas by specified properties.
    +     * All replicas in the selection have the same amount of compute capacity.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder + getReplicaSelectionOrBuilder() { + if (replicaSelectionBuilder_ != null) { + return replicaSelectionBuilder_.getMessageOrBuilder(); + } else { + return replicaSelection_ == null + ? com.google.spanner.admin.instance.v1.ReplicaSelection.getDefaultInstance() + : replicaSelection_; + } + } + /** + * + * + *
    +     * Required. Identifies replicas by specified properties.
    +     * All replicas in the selection have the same amount of compute capacity.
    +     * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.instance.v1.ReplicaSelection, + com.google.spanner.admin.instance.v1.ReplicaSelection.Builder, + com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder> + getReplicaSelectionFieldBuilder() { + if (replicaSelectionBuilder_ == null) { + replicaSelectionBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.admin.instance.v1.ReplicaSelection, + com.google.spanner.admin.instance.v1.ReplicaSelection.Builder, + com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder>( + getReplicaSelection(), getParentForChildren(), isClean()); + replicaSelection_ = null; + } + return replicaSelectionBuilder_; + } + + /** + * + * + *
    +     * The number of nodes allocated to each replica.
    +     *
    +     * This may be zero in API responses for instances that are not yet in
    +     * state `READY`.
    +     * 
    + * + * int32 node_count = 2; + * + * @return Whether the nodeCount field is set. + */ + public boolean hasNodeCount() { + return computeCapacityCase_ == 2; + } + /** + * + * + *
    +     * The number of nodes allocated to each replica.
    +     *
    +     * This may be zero in API responses for instances that are not yet in
    +     * state `READY`.
    +     * 
    + * + * int32 node_count = 2; + * + * @return The nodeCount. + */ + public int getNodeCount() { + if (computeCapacityCase_ == 2) { + return (java.lang.Integer) computeCapacity_; + } + return 0; + } + /** + * + * + *
    +     * The number of nodes allocated to each replica.
    +     *
    +     * This may be zero in API responses for instances that are not yet in
    +     * state `READY`.
    +     * 
    + * + * int32 node_count = 2; + * + * @param value The nodeCount to set. + * @return This builder for chaining. + */ + public Builder setNodeCount(int value) { + + computeCapacityCase_ = 2; + computeCapacity_ = value; + onChanged(); + return this; + } + /** + * + * + *
    +     * The number of nodes allocated to each replica.
    +     *
    +     * This may be zero in API responses for instances that are not yet in
    +     * state `READY`.
    +     * 
    + * + * int32 node_count = 2; + * + * @return This builder for chaining. + */ + public Builder clearNodeCount() { + if (computeCapacityCase_ == 2) { + computeCapacityCase_ = 0; + computeCapacity_ = null; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * The number of processing units allocated to each replica.
    +     *
    +     * This may be zero in API responses for instances that are not yet in
    +     * state `READY`.
    +     * 
    + * + * int32 processing_units = 3; + * + * @return Whether the processingUnits field is set. + */ + public boolean hasProcessingUnits() { + return computeCapacityCase_ == 3; + } + /** + * + * + *
    +     * The number of processing units allocated to each replica.
    +     *
    +     * This may be zero in API responses for instances that are not yet in
    +     * state `READY`.
    +     * 
    + * + * int32 processing_units = 3; + * + * @return The processingUnits. + */ + public int getProcessingUnits() { + if (computeCapacityCase_ == 3) { + return (java.lang.Integer) computeCapacity_; + } + return 0; + } + /** + * + * + *
    +     * The number of processing units allocated to each replica.
    +     *
    +     * This may be zero in API responses for instances that are not yet in
    +     * state `READY`.
    +     * 
    + * + * int32 processing_units = 3; + * + * @param value The processingUnits to set. + * @return This builder for chaining. + */ + public Builder setProcessingUnits(int value) { + + computeCapacityCase_ = 3; + computeCapacity_ = value; + onChanged(); + return this; + } + /** + * + * + *
    +     * The number of processing units allocated to each replica.
    +     *
    +     * This may be zero in API responses for instances that are not yet in
    +     * state `READY`.
    +     * 
    + * + * int32 processing_units = 3; + * + * @return This builder for chaining. + */ + public Builder clearProcessingUnits() { + if (computeCapacityCase_ == 3) { + computeCapacityCase_ = 0; + computeCapacity_ = null; + onChanged(); + } + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.ReplicaComputeCapacity) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ReplicaComputeCapacity) + private static final com.google.spanner.admin.instance.v1.ReplicaComputeCapacity DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.ReplicaComputeCapacity(); + } + + public static com.google.spanner.admin.instance.v1.ReplicaComputeCapacity getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReplicaComputeCapacity parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaComputeCapacity getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaComputeCapacityOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaComputeCapacityOrBuilder.java new file mode 100644 index 00000000000..a8eeea2cdcb --- /dev/null +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaComputeCapacityOrBuilder.java @@ -0,0 +1,135 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.instance.v1; + +public interface ReplicaComputeCapacityOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.ReplicaComputeCapacity) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. Identifies replicas by specified properties.
    +   * All replicas in the selection have the same amount of compute capacity.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the replicaSelection field is set. + */ + boolean hasReplicaSelection(); + /** + * + * + *
    +   * Required. Identifies replicas by specified properties.
    +   * All replicas in the selection have the same amount of compute capacity.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The replicaSelection. + */ + com.google.spanner.admin.instance.v1.ReplicaSelection getReplicaSelection(); + /** + * + * + *
    +   * Required. Identifies replicas by specified properties.
    +   * All replicas in the selection have the same amount of compute capacity.
    +   * 
    + * + * + * .google.spanner.admin.instance.v1.ReplicaSelection replica_selection = 1 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder getReplicaSelectionOrBuilder(); + + /** + * + * + *
    +   * The number of nodes allocated to each replica.
    +   *
    +   * This may be zero in API responses for instances that are not yet in
    +   * state `READY`.
    +   * 
    + * + * int32 node_count = 2; + * + * @return Whether the nodeCount field is set. + */ + boolean hasNodeCount(); + /** + * + * + *
    +   * The number of nodes allocated to each replica.
    +   *
    +   * This may be zero in API responses for instances that are not yet in
    +   * state `READY`.
    +   * 
    + * + * int32 node_count = 2; + * + * @return The nodeCount. + */ + int getNodeCount(); + + /** + * + * + *
    +   * The number of processing units allocated to each replica.
    +   *
    +   * This may be zero in API responses for instances that are not yet in
    +   * state `READY`.
    +   * 
    + * + * int32 processing_units = 3; + * + * @return Whether the processingUnits field is set. + */ + boolean hasProcessingUnits(); + /** + * + * + *
    +   * The number of processing units allocated to each replica.
    +   *
    +   * This may be zero in API responses for instances that are not yet in
    +   * state `READY`.
    +   * 
    + * + * int32 processing_units = 3; + * + * @return The processingUnits. + */ + int getProcessingUnits(); + + com.google.spanner.admin.instance.v1.ReplicaComputeCapacity.ComputeCapacityCase + getComputeCapacityCase(); +} diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaInfo.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaInfo.java index de3200c43d1..1c633dded9a 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaInfo.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaInfo.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** Protobuf type {@code google.spanner.admin.instance.v1.ReplicaInfo} */ diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaInfoOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaInfoOrBuilder.java index ebcefeef9b9..ec4219e4c17 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaInfoOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaInfoOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface ReplicaInfoOrBuilder diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaSelection.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaSelection.java new file mode 100644 index 00000000000..4ad522000a1 --- /dev/null +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaSelection.java @@ -0,0 +1,626 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/instance/v1/common.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.instance.v1; + +/** + * + * + *
    + * ReplicaSelection identifies replicas with common properties.
    + * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ReplicaSelection} + */ +public final class ReplicaSelection extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.instance.v1.ReplicaSelection) + ReplicaSelectionOrBuilder { + private static final long serialVersionUID = 0L; + // Use ReplicaSelection.newBuilder() to construct. + private ReplicaSelection(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ReplicaSelection() { + location_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ReplicaSelection(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.CommonProto + .internal_static_google_spanner_admin_instance_v1_ReplicaSelection_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.CommonProto + .internal_static_google_spanner_admin_instance_v1_ReplicaSelection_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ReplicaSelection.class, + com.google.spanner.admin.instance.v1.ReplicaSelection.Builder.class); + } + + public static final int LOCATION_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object location_ = ""; + /** + * + * + *
    +   * Required. Name of the location of the replicas (e.g., "us-central1").
    +   * 
    + * + * string location = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The location. + */ + @java.lang.Override + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + location_ = s; + return s; + } + } + /** + * + * + *
    +   * Required. Name of the location of the replicas (e.g., "us-central1").
    +   * 
    + * + * string location = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for location. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(location_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, location_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(location_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, location_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.admin.instance.v1.ReplicaSelection)) { + return super.equals(obj); + } + com.google.spanner.admin.instance.v1.ReplicaSelection other = + (com.google.spanner.admin.instance.v1.ReplicaSelection) obj; + + if (!getLocation().equals(other.getLocation())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + LOCATION_FIELD_NUMBER; + hash = (53 * hash) + getLocation().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.instance.v1.ReplicaSelection prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
    +   * ReplicaSelection identifies replicas with common properties.
    +   * 
    + * + * Protobuf type {@code google.spanner.admin.instance.v1.ReplicaSelection} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.instance.v1.ReplicaSelection) + com.google.spanner.admin.instance.v1.ReplicaSelectionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.instance.v1.CommonProto + .internal_static_google_spanner_admin_instance_v1_ReplicaSelection_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.instance.v1.CommonProto + .internal_static_google_spanner_admin_instance_v1_ReplicaSelection_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.instance.v1.ReplicaSelection.class, + com.google.spanner.admin.instance.v1.ReplicaSelection.Builder.class); + } + + // Construct using com.google.spanner.admin.instance.v1.ReplicaSelection.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + location_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.instance.v1.CommonProto + .internal_static_google_spanner_admin_instance_v1_ReplicaSelection_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaSelection getDefaultInstanceForType() { + return com.google.spanner.admin.instance.v1.ReplicaSelection.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaSelection build() { + com.google.spanner.admin.instance.v1.ReplicaSelection result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaSelection buildPartial() { + com.google.spanner.admin.instance.v1.ReplicaSelection result = + new com.google.spanner.admin.instance.v1.ReplicaSelection(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.admin.instance.v1.ReplicaSelection result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.location_ = location_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.admin.instance.v1.ReplicaSelection) { + return mergeFrom((com.google.spanner.admin.instance.v1.ReplicaSelection) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.admin.instance.v1.ReplicaSelection other) { + if (other == com.google.spanner.admin.instance.v1.ReplicaSelection.getDefaultInstance()) + return this; + if (!other.getLocation().isEmpty()) { + location_ = other.location_; + bitField0_ |= 0x00000001; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + location_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object location_ = ""; + /** + * + * + *
    +     * Required. Name of the location of the replicas (e.g., "us-central1").
    +     * 
    + * + * string location = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The location. + */ + public java.lang.String getLocation() { + java.lang.Object ref = location_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + location_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
    +     * Required. Name of the location of the replicas (e.g., "us-central1").
    +     * 
    + * + * string location = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for location. + */ + public com.google.protobuf.ByteString getLocationBytes() { + java.lang.Object ref = location_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + location_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
    +     * Required. Name of the location of the replicas (e.g., "us-central1").
    +     * 
    + * + * string location = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The location to set. + * @return This builder for chaining. + */ + public Builder setLocation(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + location_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. Name of the location of the replicas (e.g., "us-central1").
    +     * 
    + * + * string location = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearLocation() { + location_ = getDefaultInstance().getLocation(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
    +     * Required. Name of the location of the replicas (e.g., "us-central1").
    +     * 
    + * + * string location = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for location to set. + * @return This builder for chaining. + */ + public Builder setLocationBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + location_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.instance.v1.ReplicaSelection) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ReplicaSelection) + private static final com.google.spanner.admin.instance.v1.ReplicaSelection DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.admin.instance.v1.ReplicaSelection(); + } + + public static com.google.spanner.admin.instance.v1.ReplicaSelection getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReplicaSelection parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.instance.v1.ReplicaSelection getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaSelectionOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaSelectionOrBuilder.java new file mode 100644 index 00000000000..60c9b0edca0 --- /dev/null +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/ReplicaSelectionOrBuilder.java @@ -0,0 +1,51 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/instance/v1/common.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.admin.instance.v1; + +public interface ReplicaSelectionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.instance.v1.ReplicaSelection) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Required. Name of the location of the replicas (e.g., "us-central1").
    +   * 
    + * + * string location = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The location. + */ + java.lang.String getLocation(); + /** + * + * + *
    +   * Required. Name of the location of the replicas (e.g., "us-central1").
    +   * 
    + * + * string location = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for location. + */ + com.google.protobuf.ByteString getLocationBytes(); +} diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/SpannerInstanceAdminProto.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/SpannerInstanceAdminProto.java index 8b86bec104c..2edfb759d12 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/SpannerInstanceAdminProto.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/SpannerInstanceAdminProto.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public final class SpannerInstanceAdminProto { @@ -40,6 +40,10 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_spanner_admin_instance_v1_InstanceConfig_LabelsEntry_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_spanner_admin_instance_v1_InstanceConfig_LabelsEntry_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_ReplicaComputeCapacity_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_ReplicaComputeCapacity_fieldAccessorTable; static final com.google.protobuf.Descriptors.Descriptor internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable @@ -52,6 +56,14 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AutoscalingTargets_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AutoscalingTargets_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_AutoscalingConfigOverrides_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_AutoscalingConfigOverrides_fieldAccessorTable; static final com.google.protobuf.Descriptors.Descriptor internal_static_google_spanner_admin_instance_v1_Instance_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable @@ -176,6 +188,18 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsResponse_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_MoveInstanceRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_MoveInstanceRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_MoveInstanceResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_MoveInstanceResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_instance_v1_MoveInstanceMetadata_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_spanner_admin_instance_v1_MoveInstanceMetadata_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; @@ -222,308 +246,348 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "\022\025\n\021STATE_UNSPECIFIED\020\000\022\014\n\010CREATING\020\001\022\t\n" + "\005READY\020\002:`\352A]\n%spanner.googleapis.com/In" + "stanceConfig\0224projects/{project}/instanc" - + "eConfigs/{instance_config}\"\363\003\n\021Autoscali" - + "ngConfig\022f\n\022autoscaling_limits\030\001 \001(\0132E.g" - + "oogle.spanner.admin.instance.v1.Autoscal" - + "ingConfig.AutoscalingLimitsB\003\340A\002\022h\n\023auto" - + "scaling_targets\030\002 \001(\0132F.google.spanner.a" - + "dmin.instance.v1.AutoscalingConfig.Autos" - + "calingTargetsB\003\340A\002\032\227\001\n\021AutoscalingLimits" - + "\022\023\n\tmin_nodes\030\001 \001(\005H\000\022\036\n\024min_processing_" - + "units\030\002 \001(\005H\000\022\023\n\tmax_nodes\030\003 \001(\005H\001\022\036\n\024ma" - + "x_processing_units\030\004 \001(\005H\001B\013\n\tmin_limitB" - + "\013\n\tmax_limit\032r\n\022AutoscalingTargets\0222\n%hi" - + "gh_priority_cpu_utilization_percent\030\001 \001(" - + "\005B\003\340A\002\022(\n\033storage_utilization_percent\030\002 " - + "\001(\005B\003\340A\002\"\303\005\n\010Instance\022\021\n\004name\030\001 \001(\tB\003\340A\002" - + "\022=\n\006config\030\002 \001(\tB-\340A\002\372A\'\n%spanner.google" - + "apis.com/InstanceConfig\022\031\n\014display_name\030" - + "\003 \001(\tB\003\340A\002\022\022\n\nnode_count\030\005 \001(\005\022\030\n\020proces" - + "sing_units\030\t \001(\005\022T\n\022autoscaling_config\030\021" - + " \001(\01323.google.spanner.admin.instance.v1." - + "AutoscalingConfigB\003\340A\001\022D\n\005state\030\006 \001(\01620." - + "google.spanner.admin.instance.v1.Instanc" - + "e.StateB\003\340A\003\022F\n\006labels\030\007 \003(\01326.google.sp" - + "anner.admin.instance.v1.Instance.LabelsE" - + "ntry\022\025\n\rendpoint_uris\030\010 \003(\t\0224\n\013create_ti" - + "me\030\013 \001(\0132\032.google.protobuf.TimestampB\003\340A" - + "\003\0224\n\013update_time\030\014 \001(\0132\032.google.protobuf" - + ".TimestampB\003\340A\003\032-\n\013LabelsEntry\022\013\n\003key\030\001 " - + "\001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"7\n\005State\022\025\n\021STATE" - + "_UNSPECIFIED\020\000\022\014\n\010CREATING\020\001\022\t\n\005READY\020\002:" - + "M\352AJ\n\037spanner.googleapis.com/Instance\022\'p" - + "rojects/{project}/instances/{instance}\"\210" - + "\001\n\032ListInstanceConfigsRequest\022C\n\006parent\030" - + "\001 \001(\tB3\340A\002\372A-\n+cloudresourcemanager.goog" - + "leapis.com/Project\022\021\n\tpage_size\030\002 \001(\005\022\022\n" - + "\npage_token\030\003 \001(\t\"\202\001\n\033ListInstanceConfig" - + "sResponse\022J\n\020instance_configs\030\001 \003(\01320.go" - + "ogle.spanner.admin.instance.v1.InstanceC" - + "onfig\022\027\n\017next_page_token\030\002 \001(\t\"W\n\030GetIns" - + "tanceConfigRequest\022;\n\004name\030\001 \001(\tB-\340A\002\372A\'" - + "\n%spanner.googleapis.com/InstanceConfig\"" - + "\352\001\n\033CreateInstanceConfigRequest\022C\n\006paren" - + "t\030\001 \001(\tB3\340A\002\372A-\n+cloudresourcemanager.go" - + "ogleapis.com/Project\022\037\n\022instance_config_" - + "id\030\002 \001(\tB\003\340A\002\022N\n\017instance_config\030\003 \001(\01320" + + "eConfigs/{instance_config}\"\262\001\n\026ReplicaCo" + + "mputeCapacity\022R\n\021replica_selection\030\001 \001(\013" + + "22.google.spanner.admin.instance.v1.Repl" + + "icaSelectionB\003\340A\002\022\024\n\nnode_count\030\002 \001(\005H\000\022" + + "\032\n\020processing_units\030\003 \001(\005H\000B\022\n\020compute_c" + + "apacity\"\270\010\n\021AutoscalingConfig\022f\n\022autosca" + + "ling_limits\030\001 \001(\0132E.google.spanner.admin" + + ".instance.v1.AutoscalingConfig.Autoscali" + + "ngLimitsB\003\340A\002\022h\n\023autoscaling_targets\030\002 \001" + + "(\0132F.google.spanner.admin.instance.v1.Au" + + "toscalingConfig.AutoscalingTargetsB\003\340A\002\022" + + "|\n\036asymmetric_autoscaling_options\030\003 \003(\0132" + + "O.google.spanner.admin.instance.v1.Autos" + + "calingConfig.AsymmetricAutoscalingOption" + + "B\003\340A\001\032\227\001\n\021AutoscalingLimits\022\023\n\tmin_nodes" + + "\030\001 \001(\005H\000\022\036\n\024min_processing_units\030\002 \001(\005H\000" + + "\022\023\n\tmax_nodes\030\003 \001(\005H\001\022\036\n\024max_processing_" + + "units\030\004 \001(\005H\001B\013\n\tmin_limitB\013\n\tmax_limit\032" + + "r\n\022AutoscalingTargets\0222\n%high_priority_c" + + "pu_utilization_percent\030\001 \001(\005B\003\340A\002\022(\n\033sto" + + "rage_utilization_percent\030\002 \001(\005B\003\340A\002\032\304\003\n\033" + + "AsymmetricAutoscalingOption\022R\n\021replica_s" + + "election\030\001 \001(\01322.google.spanner.admin.in" + + "stance.v1.ReplicaSelectionB\003\340A\002\022\202\001\n\tover" + + "rides\030\002 \001(\0132j.google.spanner.admin.insta" + + "nce.v1.AutoscalingConfig.AsymmetricAutos" + + "calingOption.AutoscalingConfigOverridesB" + + "\003\340A\001\032\313\001\n\032AutoscalingConfigOverrides\022f\n\022a" + + "utoscaling_limits\030\001 \001(\0132E.google.spanner" + + ".admin.instance.v1.AutoscalingConfig.Aut" + + "oscalingLimitsB\003\340A\001\022E\n8autoscaling_targe" + + "t_high_priority_cpu_utilization_percent\030" + + "\002 \001(\005B\003\340A\001\"\305\007\n\010Instance\022\021\n\004name\030\001 \001(\tB\003\340" + + "A\002\022=\n\006config\030\002 \001(\tB-\340A\002\372A\'\n%spanner.goog" + + "leapis.com/InstanceConfig\022\031\n\014display_nam" + + "e\030\003 \001(\tB\003\340A\002\022\022\n\nnode_count\030\005 \001(\005\022\030\n\020proc" + + "essing_units\030\t \001(\005\022_\n\030replica_compute_ca" + + "pacity\030\023 \003(\01328.google.spanner.admin.inst" + + "ance.v1.ReplicaComputeCapacityB\003\340A\003\022T\n\022a" + + "utoscaling_config\030\021 \001(\01323.google.spanner" + + ".admin.instance.v1.AutoscalingConfigB\003\340A" + + "\001\022D\n\005state\030\006 \001(\01620.google.spanner.admin." + + "instance.v1.Instance.StateB\003\340A\003\022F\n\006label" + + "s\030\007 \003(\01326.google.spanner.admin.instance." + + "v1.Instance.LabelsEntry\022\025\n\rendpoint_uris" + + "\030\010 \003(\t\0224\n\013create_time\030\013 \001(\0132\032.google.pro" + + "tobuf.TimestampB\003\340A\003\0224\n\013update_time\030\014 \001(" + + "\0132\032.google.protobuf.TimestampB\003\340A\003\022H\n\007ed" + + "ition\030\024 \001(\01622.google.spanner.admin.insta" + + "nce.v1.Instance.EditionB\003\340A\001\032-\n\013LabelsEn" + + "try\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\"7\n\005S" + + "tate\022\025\n\021STATE_UNSPECIFIED\020\000\022\014\n\010CREATING\020" + + "\001\022\t\n\005READY\020\002\"U\n\007Edition\022\027\n\023EDITION_UNSPE" + + "CIFIED\020\000\022\014\n\010STANDARD\020\001\022\016\n\nENTERPRISE\020\002\022\023" + + "\n\017ENTERPRISE_PLUS\020\003:M\352AJ\n\037spanner.google" + + "apis.com/Instance\022\'projects/{project}/in" + + "stances/{instance}\"\210\001\n\032ListInstanceConfi" + + "gsRequest\022C\n\006parent\030\001 \001(\tB3\340A\002\372A-\n+cloud" + + "resourcemanager.googleapis.com/Project\022\021" + + "\n\tpage_size\030\002 \001(\005\022\022\n\npage_token\030\003 \001(\t\"\202\001" + + "\n\033ListInstanceConfigsResponse\022J\n\020instanc" + + "e_configs\030\001 \003(\01320.google.spanner.admin.i" + + "nstance.v1.InstanceConfig\022\027\n\017next_page_t" + + "oken\030\002 \001(\t\"W\n\030GetInstanceConfigRequest\022;" + + "\n\004name\030\001 \001(\tB-\340A\002\372A\'\n%spanner.googleapis" + + ".com/InstanceConfig\"\352\001\n\033CreateInstanceCo" + + "nfigRequest\022C\n\006parent\030\001 \001(\tB3\340A\002\372A-\n+clo" + + "udresourcemanager.googleapis.com/Project" + + "\022\037\n\022instance_config_id\030\002 \001(\tB\003\340A\002\022N\n\017ins" + + "tance_config\030\003 \001(\01320.google.spanner.admi" + + "n.instance.v1.InstanceConfigB\003\340A\002\022\025\n\rval" + + "idate_only\030\004 \001(\010\"\272\001\n\033UpdateInstanceConfi" + + "gRequest\022N\n\017instance_config\030\001 \001(\01320.goog" + + "le.spanner.admin.instance.v1.InstanceCon" + + "figB\003\340A\002\0224\n\013update_mask\030\002 \001(\0132\032.google.p" + + "rotobuf.FieldMaskB\003\340A\002\022\025\n\rvalidate_only\030" + + "\003 \001(\010\"\177\n\033DeleteInstanceConfigRequest\022;\n\004" + + "name\030\001 \001(\tB-\340A\002\372A\'\n%spanner.googleapis.c" + + "om/InstanceConfig\022\014\n\004etag\030\002 \001(\t\022\025\n\rvalid" + + "ate_only\030\003 \001(\010\"\241\001\n#ListInstanceConfigOpe" + + "rationsRequest\022C\n\006parent\030\001 \001(\tB3\340A\002\372A-\n+" + + "cloudresourcemanager.googleapis.com/Proj" + + "ect\022\016\n\006filter\030\002 \001(\t\022\021\n\tpage_size\030\003 \001(\005\022\022" + + "\n\npage_token\030\004 \001(\t\"r\n$ListInstanceConfig" + + "OperationsResponse\0221\n\noperations\030\001 \003(\0132\035" + + ".google.longrunning.Operation\022\027\n\017next_pa" + + "ge_token\030\002 \001(\t\"{\n\022GetInstanceRequest\0225\n\004" + + "name\030\001 \001(\tB\'\340A\002\372A!\n\037spanner.googleapis.c" + + "om/Instance\022.\n\nfield_mask\030\002 \001(\0132\032.google" + + ".protobuf.FieldMask\"\271\001\n\025CreateInstanceRe" + + "quest\022C\n\006parent\030\001 \001(\tB3\340A\002\372A-\n+cloudreso" + + "urcemanager.googleapis.com/Project\022\030\n\013in" + + "stance_id\030\002 \001(\tB\003\340A\002\022A\n\010instance\030\003 \001(\0132*" + ".google.spanner.admin.instance.v1.Instan" - + "ceConfigB\003\340A\002\022\025\n\rvalidate_only\030\004 \001(\010\"\272\001\n" - + "\033UpdateInstanceConfigRequest\022N\n\017instance" - + "_config\030\001 \001(\01320.google.spanner.admin.ins" - + "tance.v1.InstanceConfigB\003\340A\002\0224\n\013update_m" - + "ask\030\002 \001(\0132\032.google.protobuf.FieldMaskB\003\340" - + "A\002\022\025\n\rvalidate_only\030\003 \001(\010\"\177\n\033DeleteInsta" - + "nceConfigRequest\022;\n\004name\030\001 \001(\tB-\340A\002\372A\'\n%" - + "spanner.googleapis.com/InstanceConfig\022\014\n" - + "\004etag\030\002 \001(\t\022\025\n\rvalidate_only\030\003 \001(\010\"\241\001\n#L" - + "istInstanceConfigOperationsRequest\022C\n\006pa" - + "rent\030\001 \001(\tB3\340A\002\372A-\n+cloudresourcemanager" - + ".googleapis.com/Project\022\016\n\006filter\030\002 \001(\t\022" - + "\021\n\tpage_size\030\003 \001(\005\022\022\n\npage_token\030\004 \001(\t\"r" - + "\n$ListInstanceConfigOperationsResponse\0221" - + "\n\noperations\030\001 \003(\0132\035.google.longrunning." - + "Operation\022\027\n\017next_page_token\030\002 \001(\t\"{\n\022Ge" - + "tInstanceRequest\0225\n\004name\030\001 \001(\tB\'\340A\002\372A!\n\037" - + "spanner.googleapis.com/Instance\022.\n\nfield" - + "_mask\030\002 \001(\0132\032.google.protobuf.FieldMask\"" - + "\271\001\n\025CreateInstanceRequest\022C\n\006parent\030\001 \001(" - + "\tB3\340A\002\372A-\n+cloudresourcemanager.googleap" - + "is.com/Project\022\030\n\013instance_id\030\002 \001(\tB\003\340A\002" - + "\022A\n\010instance\030\003 \001(\0132*.google.spanner.admi" - + "n.instance.v1.InstanceB\003\340A\002\"\311\001\n\024ListInst" - + "ancesRequest\022C\n\006parent\030\001 \001(\tB3\340A\002\372A-\n+cl" - + "oudresourcemanager.googleapis.com/Projec" - + "t\022\021\n\tpage_size\030\002 \001(\005\022\022\n\npage_token\030\003 \001(\t" - + "\022\016\n\006filter\030\004 \001(\t\0225\n\021instance_deadline\030\005 " - + "\001(\0132\032.google.protobuf.Timestamp\"\204\001\n\025List" - + "InstancesResponse\022=\n\tinstances\030\001 \003(\0132*.g" - + "oogle.spanner.admin.instance.v1.Instance" - + "\022\027\n\017next_page_token\030\002 \001(\t\022\023\n\013unreachable" - + "\030\003 \003(\t\"\217\001\n\025UpdateInstanceRequest\022A\n\010inst" - + "ance\030\001 \001(\0132*.google.spanner.admin.instan" - + "ce.v1.InstanceB\003\340A\002\0223\n\nfield_mask\030\002 \001(\0132" - + "\032.google.protobuf.FieldMaskB\003\340A\002\"N\n\025Dele" - + "teInstanceRequest\0225\n\004name\030\001 \001(\tB\'\340A\002\372A!\n" - + "\037spanner.googleapis.com/Instance\"\277\002\n\026Cre" - + "ateInstanceMetadata\022<\n\010instance\030\001 \001(\0132*." - + "google.spanner.admin.instance.v1.Instanc" - + "e\022.\n\nstart_time\030\002 \001(\0132\032.google.protobuf." - + "Timestamp\022/\n\013cancel_time\030\003 \001(\0132\032.google." - + "protobuf.Timestamp\022,\n\010end_time\030\004 \001(\0132\032.g" - + "oogle.protobuf.Timestamp\022X\n\033expected_ful" - + "fillment_period\030\005 \001(\01623.google.spanner.a" - + "dmin.instance.v1.FulfillmentPeriod\"\277\002\n\026U" - + "pdateInstanceMetadata\022<\n\010instance\030\001 \001(\0132" - + "*.google.spanner.admin.instance.v1.Insta" - + "nce\022.\n\nstart_time\030\002 \001(\0132\032.google.protobu" - + "f.Timestamp\022/\n\013cancel_time\030\003 \001(\0132\032.googl" - + "e.protobuf.Timestamp\022,\n\010end_time\030\004 \001(\0132\032" - + ".google.protobuf.Timestamp\022X\n\033expected_f" - + "ulfillment_period\030\005 \001(\01623.google.spanner" - + ".admin.instance.v1.FulfillmentPeriod\"\341\001\n" - + "\034CreateInstanceConfigMetadata\022I\n\017instanc" - + "e_config\030\001 \001(\01320.google.spanner.admin.in" - + "stance.v1.InstanceConfig\022E\n\010progress\030\002 \001" - + "(\01323.google.spanner.admin.instance.v1.Op" - + "erationProgress\022/\n\013cancel_time\030\003 \001(\0132\032.g" - + "oogle.protobuf.Timestamp\"\341\001\n\034UpdateInsta" - + "nceConfigMetadata\022I\n\017instance_config\030\001 \001" - + "(\01320.google.spanner.admin.instance.v1.In" - + "stanceConfig\022E\n\010progress\030\002 \001(\01323.google." - + "spanner.admin.instance.v1.OperationProgr" - + "ess\022/\n\013cancel_time\030\003 \001(\0132\032.google.protob" - + "uf.Timestamp\"\216\005\n\021InstancePartition\022\021\n\004na" - + "me\030\001 \001(\tB\003\340A\002\022=\n\006config\030\002 \001(\tB-\340A\002\372A\'\n%s" - + "panner.googleapis.com/InstanceConfig\022\031\n\014" - + "display_name\030\003 \001(\tB\003\340A\002\022\024\n\nnode_count\030\005 " - + "\001(\005H\000\022\032\n\020processing_units\030\006 \001(\005H\000\022M\n\005sta" - + "te\030\007 \001(\01629.google.spanner.admin.instance" - + ".v1.InstancePartition.StateB\003\340A\003\0224\n\013crea" - + "te_time\030\010 \001(\0132\032.google.protobuf.Timestam" - + "pB\003\340A\003\0224\n\013update_time\030\t \001(\0132\032.google.pro" - + "tobuf.TimestampB\003\340A\003\022\"\n\025referencing_data" - + "bases\030\n \003(\tB\003\340A\003\022 \n\023referencing_backups\030" - + "\013 \003(\tB\003\340A\003\022\014\n\004etag\030\014 \001(\t\"7\n\005State\022\025\n\021STA" - + "TE_UNSPECIFIED\020\000\022\014\n\010CREATING\020\001\022\t\n\005READY\020" - + "\002:~\352A{\n(spanner.googleapis.com/InstanceP" - + "artition\022Oprojects/{project}/instances/{" - + "instance}/instancePartitions/{instance_p" - + "artition}B\022\n\020compute_capacity\"\201\002\n\037Create" - + "InstancePartitionMetadata\022O\n\022instance_pa" - + "rtition\030\001 \001(\01323.google.spanner.admin.ins" - + "tance.v1.InstancePartition\022.\n\nstart_time" - + "\030\002 \001(\0132\032.google.protobuf.Timestamp\022/\n\013ca" - + "ncel_time\030\003 \001(\0132\032.google.protobuf.Timest" - + "amp\022,\n\010end_time\030\004 \001(\0132\032.google.protobuf." - + "Timestamp\"\323\001\n\036CreateInstancePartitionReq" - + "uest\0227\n\006parent\030\001 \001(\tB\'\340A\002\372A!\n\037spanner.go" - + "ogleapis.com/Instance\022\"\n\025instance_partit" - + "ion_id\030\002 \001(\tB\003\340A\002\022T\n\022instance_partition\030" - + "\003 \001(\01323.google.spanner.admin.instance.v1" - + ".InstancePartitionB\003\340A\002\"n\n\036DeleteInstanc" - + "ePartitionRequest\022>\n\004name\030\001 \001(\tB0\340A\002\372A*\n" - + "(spanner.googleapis.com/InstancePartitio" - + "n\022\014\n\004etag\030\002 \001(\t\"]\n\033GetInstancePartitionR" - + "equest\022>\n\004name\030\001 \001(\tB0\340A\002\372A*\n(spanner.go" - + "ogleapis.com/InstancePartition\"\253\001\n\036Updat" - + "eInstancePartitionRequest\022T\n\022instance_pa" - + "rtition\030\001 \001(\01323.google.spanner.admin.ins" - + "tance.v1.InstancePartitionB\003\340A\002\0223\n\nfield" - + "_mask\030\002 \001(\0132\032.google.protobuf.FieldMaskB" - + "\003\340A\002\"\201\002\n\037UpdateInstancePartitionMetadata" - + "\022O\n\022instance_partition\030\001 \001(\01323.google.sp" + + "ceB\003\340A\002\"\311\001\n\024ListInstancesRequest\022C\n\006pare" + + "nt\030\001 \001(\tB3\340A\002\372A-\n+cloudresourcemanager.g" + + "oogleapis.com/Project\022\021\n\tpage_size\030\002 \001(\005" + + "\022\022\n\npage_token\030\003 \001(\t\022\016\n\006filter\030\004 \001(\t\0225\n\021" + + "instance_deadline\030\005 \001(\0132\032.google.protobu" + + "f.Timestamp\"\204\001\n\025ListInstancesResponse\022=\n" + + "\tinstances\030\001 \003(\0132*.google.spanner.admin." + + "instance.v1.Instance\022\027\n\017next_page_token\030" + + "\002 \001(\t\022\023\n\013unreachable\030\003 \003(\t\"\217\001\n\025UpdateIns" + + "tanceRequest\022A\n\010instance\030\001 \001(\0132*.google." + + "spanner.admin.instance.v1.InstanceB\003\340A\002\022" + + "3\n\nfield_mask\030\002 \001(\0132\032.google.protobuf.Fi" + + "eldMaskB\003\340A\002\"N\n\025DeleteInstanceRequest\0225\n" + + "\004name\030\001 \001(\tB\'\340A\002\372A!\n\037spanner.googleapis." + + "com/Instance\"\277\002\n\026CreateInstanceMetadata\022" + + "<\n\010instance\030\001 \001(\0132*.google.spanner.admin" + + ".instance.v1.Instance\022.\n\nstart_time\030\002 \001(" + + "\0132\032.google.protobuf.Timestamp\022/\n\013cancel_" + + "time\030\003 \001(\0132\032.google.protobuf.Timestamp\022," + + "\n\010end_time\030\004 \001(\0132\032.google.protobuf.Times" + + "tamp\022X\n\033expected_fulfillment_period\030\005 \001(" + + "\01623.google.spanner.admin.instance.v1.Ful" + + "fillmentPeriod\"\277\002\n\026UpdateInstanceMetadat" + + "a\022<\n\010instance\030\001 \001(\0132*.google.spanner.adm" + + "in.instance.v1.Instance\022.\n\nstart_time\030\002 " + + "\001(\0132\032.google.protobuf.Timestamp\022/\n\013cance" + + "l_time\030\003 \001(\0132\032.google.protobuf.Timestamp" + + "\022,\n\010end_time\030\004 \001(\0132\032.google.protobuf.Tim" + + "estamp\022X\n\033expected_fulfillment_period\030\005 " + + "\001(\01623.google.spanner.admin.instance.v1.F" + + "ulfillmentPeriod\"\341\001\n\034CreateInstanceConfi" + + "gMetadata\022I\n\017instance_config\030\001 \001(\01320.goo" + + "gle.spanner.admin.instance.v1.InstanceCo" + + "nfig\022E\n\010progress\030\002 \001(\01323.google.spanner." + + "admin.instance.v1.OperationProgress\022/\n\013c" + + "ancel_time\030\003 \001(\0132\032.google.protobuf.Times" + + "tamp\"\341\001\n\034UpdateInstanceConfigMetadata\022I\n" + + "\017instance_config\030\001 \001(\01320.google.spanner." + + "admin.instance.v1.InstanceConfig\022E\n\010prog" + + "ress\030\002 \001(\01323.google.spanner.admin.instan" + + "ce.v1.OperationProgress\022/\n\013cancel_time\030\003" + + " \001(\0132\032.google.protobuf.Timestamp\"\216\005\n\021Ins" + + "tancePartition\022\021\n\004name\030\001 \001(\tB\003\340A\002\022=\n\006con" + + "fig\030\002 \001(\tB-\340A\002\372A\'\n%spanner.googleapis.co" + + "m/InstanceConfig\022\031\n\014display_name\030\003 \001(\tB\003" + + "\340A\002\022\024\n\nnode_count\030\005 \001(\005H\000\022\032\n\020processing_" + + "units\030\006 \001(\005H\000\022M\n\005state\030\007 \001(\01629.google.sp" + "anner.admin.instance.v1.InstancePartitio" - + "n\022.\n\nstart_time\030\002 \001(\0132\032.google.protobuf." - + "Timestamp\022/\n\013cancel_time\030\003 \001(\0132\032.google." - + "protobuf.Timestamp\022,\n\010end_time\030\004 \001(\0132\032.g" - + "oogle.protobuf.Timestamp\"\305\001\n\035ListInstanc" - + "ePartitionsRequest\0227\n\006parent\030\001 \001(\tB\'\340A\002\372" - + "A!\n\037spanner.googleapis.com/Instance\022\021\n\tp" - + "age_size\030\002 \001(\005\022\022\n\npage_token\030\003 \001(\t\022D\n\033in" - + "stance_partition_deadline\030\004 \001(\0132\032.google" - + ".protobuf.TimestampB\003\340A\001\"\240\001\n\036ListInstanc" - + "ePartitionsResponse\022P\n\023instance_partitio" - + "ns\030\001 \003(\01323.google.spanner.admin.instance" - + ".v1.InstancePartition\022\027\n\017next_page_token" - + "\030\002 \001(\t\022\023\n\013unreachable\030\003 \003(\t\"\355\001\n&ListInst" - + "ancePartitionOperationsRequest\0227\n\006parent" - + "\030\001 \001(\tB\'\340A\002\372A!\n\037spanner.googleapis.com/I" - + "nstance\022\023\n\006filter\030\002 \001(\tB\003\340A\001\022\026\n\tpage_siz" - + "e\030\003 \001(\005B\003\340A\001\022\027\n\npage_token\030\004 \001(\tB\003\340A\001\022D\n" - + "\033instance_partition_deadline\030\005 \001(\0132\032.goo" - + "gle.protobuf.TimestampB\003\340A\001\"\236\001\n\'ListInst" - + "ancePartitionOperationsResponse\0221\n\nopera" - + "tions\030\001 \003(\0132\035.google.longrunning.Operati" - + "on\022\027\n\017next_page_token\030\002 \001(\t\022\'\n\037unreachab" - + "le_instance_partitions\030\003 \003(\t2\316%\n\rInstanc" - + "eAdmin\022\314\001\n\023ListInstanceConfigs\022<.google." - + "spanner.admin.instance.v1.ListInstanceCo" - + "nfigsRequest\032=.google.spanner.admin.inst" - + "ance.v1.ListInstanceConfigsResponse\"8\332A\006" - + "parent\202\323\344\223\002)\022\'/v1/{parent=projects/*}/in" - + "stanceConfigs\022\271\001\n\021GetInstanceConfig\022:.go" - + "ogle.spanner.admin.instance.v1.GetInstan" - + "ceConfigRequest\0320.google.spanner.admin.i" - + "nstance.v1.InstanceConfig\"6\332A\004name\202\323\344\223\002)" - + "\022\'/v1/{name=projects/*/instanceConfigs/*" - + "}\022\310\002\n\024CreateInstanceConfig\022=.google.span" - + "ner.admin.instance.v1.CreateInstanceConf" - + "igRequest\032\035.google.longrunning.Operation" - + "\"\321\001\312Ap\n/google.spanner.admin.instance.v1" - + ".InstanceConfig\022=google.spanner.admin.in" - + "stance.v1.CreateInstanceConfigMetadata\332A" - + ")parent,instance_config,instance_config_" - + "id\202\323\344\223\002,\"\'/v1/{parent=projects/*}/instan" - + "ceConfigs:\001*\022\312\002\n\024UpdateInstanceConfig\022=." - + "google.spanner.admin.instance.v1.UpdateI" - + "nstanceConfigRequest\032\035.google.longrunnin" - + "g.Operation\"\323\001\312Ap\n/google.spanner.admin." - + "instance.v1.InstanceConfig\022=google.spann" - + "er.admin.instance.v1.UpdateInstanceConfi" - + "gMetadata\332A\033instance_config,update_mask\202" - + "\323\344\223\002<27/v1/{instance_config.name=project" - + "s/*/instanceConfigs/*}:\001*\022\245\001\n\024DeleteInst" - + "anceConfig\022=.google.spanner.admin.instan" - + "ce.v1.DeleteInstanceConfigRequest\032\026.goog" - + "le.protobuf.Empty\"6\332A\004name\202\323\344\223\002)*\'/v1/{n" - + "ame=projects/*/instanceConfigs/*}\022\360\001\n\034Li" - + "stInstanceConfigOperations\022E.google.span" - + "ner.admin.instance.v1.ListInstanceConfig" - + "OperationsRequest\032F.google.spanner.admin" - + ".instance.v1.ListInstanceConfigOperation" - + "sResponse\"A\332A\006parent\202\323\344\223\0022\0220/v1/{parent=" - + "projects/*}/instanceConfigOperations\022\264\001\n" - + "\rListInstances\0226.google.spanner.admin.in" - + "stance.v1.ListInstancesRequest\0327.google." - + "spanner.admin.instance.v1.ListInstancesR" - + "esponse\"2\332A\006parent\202\323\344\223\002#\022!/v1/{parent=pr" - + "ojects/*}/instances\022\344\001\n\026ListInstancePart" - + "itions\022?.google.spanner.admin.instance.v" - + "1.ListInstancePartitionsRequest\032@.google" - + ".spanner.admin.instance.v1.ListInstanceP" - + "artitionsResponse\"G\332A\006parent\202\323\344\223\0028\0226/v1/" - + "{parent=projects/*/instances/*}/instance" - + "Partitions\022\241\001\n\013GetInstance\0224.google.span" - + "ner.admin.instance.v1.GetInstanceRequest" - + "\032*.google.spanner.admin.instance.v1.Inst" - + "ance\"0\332A\004name\202\323\344\223\002#\022!/v1/{name=projects/" - + "*/instances/*}\022\234\002\n\016CreateInstance\0227.goog" - + "le.spanner.admin.instance.v1.CreateInsta" - + "nceRequest\032\035.google.longrunning.Operatio" - + "n\"\261\001\312Ad\n)google.spanner.admin.instance.v" - + "1.Instance\0227google.spanner.admin.instanc" - + "e.v1.CreateInstanceMetadata\332A\033parent,ins" - + "tance_id,instance\202\323\344\223\002&\"!/v1/{parent=pro" - + "jects/*}/instances:\001*\022\235\002\n\016UpdateInstance" - + "\0227.google.spanner.admin.instance.v1.Upda" - + "teInstanceRequest\032\035.google.longrunning.O" - + "peration\"\262\001\312Ad\n)google.spanner.admin.ins" - + "tance.v1.Instance\0227google.spanner.admin." - + "instance.v1.UpdateInstanceMetadata\332A\023ins" - + "tance,field_mask\202\323\344\223\002/2*/v1/{instance.na" - + "me=projects/*/instances/*}:\001*\022\223\001\n\016Delete" - + "Instance\0227.google.spanner.admin.instance" - + ".v1.DeleteInstanceRequest\032\026.google.proto" - + "buf.Empty\"0\332A\004name\202\323\344\223\002#*!/v1/{name=proj" - + "ects/*/instances/*}\022\232\001\n\014SetIamPolicy\022\".g" - + "oogle.iam.v1.SetIamPolicyRequest\032\025.googl" - + "e.iam.v1.Policy\"O\332A\017resource,policy\202\323\344\223\002" - + "7\"2/v1/{resource=projects/*/instances/*}" - + ":setIamPolicy:\001*\022\223\001\n\014GetIamPolicy\022\".goog" - + "le.iam.v1.GetIamPolicyRequest\032\025.google.i" - + "am.v1.Policy\"H\332A\010resource\202\323\344\223\0027\"2/v1/{re" - + "source=projects/*/instances/*}:getIamPol" - + "icy:\001*\022\305\001\n\022TestIamPermissions\022(.google.i" - + "am.v1.TestIamPermissionsRequest\032).google" - + ".iam.v1.TestIamPermissionsResponse\"Z\332A\024r" - + "esource,permissions\202\323\344\223\002=\"8/v1/{resource" - + "=projects/*/instances/*}:testIamPermissi" - + "ons:\001*\022\321\001\n\024GetInstancePartition\022=.google" - + ".spanner.admin.instance.v1.GetInstancePa" - + "rtitionRequest\0323.google.spanner.admin.in" - + "stance.v1.InstancePartition\"E\332A\004name\202\323\344\223" - + "\0028\0226/v1/{name=projects/*/instances/*/ins" - + "tancePartitions/*}\022\351\002\n\027CreateInstancePar" - + "tition\022@.google.spanner.admin.instance.v" - + "1.CreateInstancePartitionRequest\032\035.googl" - + "e.longrunning.Operation\"\354\001\312Av\n2google.sp" + + "n.StateB\003\340A\003\0224\n\013create_time\030\010 \001(\0132\032.goog" + + "le.protobuf.TimestampB\003\340A\003\0224\n\013update_tim" + + "e\030\t \001(\0132\032.google.protobuf.TimestampB\003\340A\003" + + "\022\"\n\025referencing_databases\030\n \003(\tB\003\340A\003\022 \n\023" + + "referencing_backups\030\013 \003(\tB\003\340A\003\022\014\n\004etag\030\014" + + " \001(\t\"7\n\005State\022\025\n\021STATE_UNSPECIFIED\020\000\022\014\n\010" + + "CREATING\020\001\022\t\n\005READY\020\002:~\352A{\n(spanner.goog" + + "leapis.com/InstancePartition\022Oprojects/{" + + "project}/instances/{instance}/instancePa" + + "rtitions/{instance_partition}B\022\n\020compute" + + "_capacity\"\201\002\n\037CreateInstancePartitionMet" + + "adata\022O\n\022instance_partition\030\001 \001(\01323.goog" + + "le.spanner.admin.instance.v1.InstancePar" + + "tition\022.\n\nstart_time\030\002 \001(\0132\032.google.prot" + + "obuf.Timestamp\022/\n\013cancel_time\030\003 \001(\0132\032.go" + + "ogle.protobuf.Timestamp\022,\n\010end_time\030\004 \001(" + + "\0132\032.google.protobuf.Timestamp\"\323\001\n\036Create" + + "InstancePartitionRequest\0227\n\006parent\030\001 \001(\t" + + "B\'\340A\002\372A!\n\037spanner.googleapis.com/Instanc" + + "e\022\"\n\025instance_partition_id\030\002 \001(\tB\003\340A\002\022T\n" + + "\022instance_partition\030\003 \001(\01323.google.spann" + + "er.admin.instance.v1.InstancePartitionB\003" + + "\340A\002\"n\n\036DeleteInstancePartitionRequest\022>\n" + + "\004name\030\001 \001(\tB0\340A\002\372A*\n(spanner.googleapis." + + "com/InstancePartition\022\014\n\004etag\030\002 \001(\t\"]\n\033G" + + "etInstancePartitionRequest\022>\n\004name\030\001 \001(\t" + + "B0\340A\002\372A*\n(spanner.googleapis.com/Instanc" + + "ePartition\"\253\001\n\036UpdateInstancePartitionRe" + + "quest\022T\n\022instance_partition\030\001 \001(\01323.goog" + + "le.spanner.admin.instance.v1.InstancePar" + + "titionB\003\340A\002\0223\n\nfield_mask\030\002 \001(\0132\032.google" + + ".protobuf.FieldMaskB\003\340A\002\"\201\002\n\037UpdateInsta" + + "ncePartitionMetadata\022O\n\022instance_partiti" + + "on\030\001 \001(\01323.google.spanner.admin.instance" + + ".v1.InstancePartition\022.\n\nstart_time\030\002 \001(" + + "\0132\032.google.protobuf.Timestamp\022/\n\013cancel_" + + "time\030\003 \001(\0132\032.google.protobuf.Timestamp\022," + + "\n\010end_time\030\004 \001(\0132\032.google.protobuf.Times" + + "tamp\"\305\001\n\035ListInstancePartitionsRequest\0227" + + "\n\006parent\030\001 \001(\tB\'\340A\002\372A!\n\037spanner.googleap" + + "is.com/Instance\022\021\n\tpage_size\030\002 \001(\005\022\022\n\npa" + + "ge_token\030\003 \001(\t\022D\n\033instance_partition_dea" + + "dline\030\004 \001(\0132\032.google.protobuf.TimestampB" + + "\003\340A\001\"\240\001\n\036ListInstancePartitionsResponse\022" + + "P\n\023instance_partitions\030\001 \003(\01323.google.sp" + "anner.admin.instance.v1.InstancePartitio" - + "n\022@google.spanner.admin.instance.v1.Crea" - + "teInstancePartitionMetadata\332A/parent,ins" - + "tance_partition,instance_partition_id\202\323\344" - + "\223\002;\"6/v1/{parent=projects/*/instances/*}" - + "/instancePartitions:\001*\022\272\001\n\027DeleteInstanc" - + "ePartition\022@.google.spanner.admin.instan" - + "ce.v1.DeleteInstancePartitionRequest\032\026.g" - + "oogle.protobuf.Empty\"E\332A\004name\202\323\344\223\0028*6/v1" - + "/{name=projects/*/instances/*/instancePa" - + "rtitions/*}\022\352\002\n\027UpdateInstancePartition\022" - + "@.google.spanner.admin.instance.v1.Updat" - + "eInstancePartitionRequest\032\035.google.longr" - + "unning.Operation\"\355\001\312Av\n2google.spanner.a" - + "dmin.instance.v1.InstancePartition\022@goog" - + "le.spanner.admin.instance.v1.UpdateInsta" - + "ncePartitionMetadata\332A\035instance_partitio" - + "n,field_mask\202\323\344\223\002N2I/v1/{instance_partit" - + "ion.name=projects/*/instances/*/instance" - + "Partitions/*}:\001*\022\210\002\n\037ListInstancePartiti" - + "onOperations\022H.google.spanner.admin.inst" - + "ance.v1.ListInstancePartitionOperationsR" - + "equest\032I.google.spanner.admin.instance.v" - + "1.ListInstancePartitionOperationsRespons" - + "e\"P\332A\006parent\202\323\344\223\002A\022?/v1/{parent=projects" - + "/*/instances/*}/instancePartitionOperati" - + "ons\032x\312A\026spanner.googleapis.com\322A\\https:/" - + "/www.googleapis.com/auth/cloud-platform," - + "https://www.googleapis.com/auth/spanner." - + "adminB\213\002\n$com.google.spanner.admin.insta" - + "nce.v1B\031SpannerInstanceAdminProtoP\001ZFclo" - + "ud.google.com/go/spanner/admin/instance/" - + "apiv1/instancepb;instancepb\252\002&Google.Clo" - + "ud.Spanner.Admin.Instance.V1\312\002&Google\\Cl" - + "oud\\Spanner\\Admin\\Instance\\V1\352\002+Google::" - + "Cloud::Spanner::Admin::Instance::V1b\006pro" - + "to3" + + "n\022\027\n\017next_page_token\030\002 \001(\t\022\023\n\013unreachabl" + + "e\030\003 \003(\t\"\355\001\n&ListInstancePartitionOperati" + + "onsRequest\0227\n\006parent\030\001 \001(\tB\'\340A\002\372A!\n\037span" + + "ner.googleapis.com/Instance\022\023\n\006filter\030\002 " + + "\001(\tB\003\340A\001\022\026\n\tpage_size\030\003 \001(\005B\003\340A\001\022\027\n\npage" + + "_token\030\004 \001(\tB\003\340A\001\022D\n\033instance_partition_" + + "deadline\030\005 \001(\0132\032.google.protobuf.Timesta" + + "mpB\003\340A\001\"\236\001\n\'ListInstancePartitionOperati" + + "onsResponse\0221\n\noperations\030\001 \003(\0132\035.google" + + ".longrunning.Operation\022\027\n\017next_page_toke" + + "n\030\002 \001(\t\022\'\n\037unreachable_instance_partitio" + + "ns\030\003 \003(\t\"\222\001\n\023MoveInstanceRequest\0225\n\004name" + + "\030\001 \001(\tB\'\340A\002\372A!\n\037spanner.googleapis.com/I" + + "nstance\022D\n\rtarget_config\030\002 \001(\tB-\340A\002\372A\'\n%" + + "spanner.googleapis.com/InstanceConfig\"\026\n" + + "\024MoveInstanceResponse\"\245\001\n\024MoveInstanceMe" + + "tadata\022\025\n\rtarget_config\030\001 \001(\t\022E\n\010progres" + + "s\030\002 \001(\01323.google.spanner.admin.instance." + + "v1.OperationProgress\022/\n\013cancel_time\030\003 \001(" + + "\0132\032.google.protobuf.Timestamp2\332\'\n\rInstan" + + "ceAdmin\022\314\001\n\023ListInstanceConfigs\022<.google" + + ".spanner.admin.instance.v1.ListInstanceC" + + "onfigsRequest\032=.google.spanner.admin.ins" + + "tance.v1.ListInstanceConfigsResponse\"8\332A" + + "\006parent\202\323\344\223\002)\022\'/v1/{parent=projects/*}/i" + + "nstanceConfigs\022\271\001\n\021GetInstanceConfig\022:.g" + + "oogle.spanner.admin.instance.v1.GetInsta" + + "nceConfigRequest\0320.google.spanner.admin." + + "instance.v1.InstanceConfig\"6\332A\004name\202\323\344\223\002" + + ")\022\'/v1/{name=projects/*/instanceConfigs/" + + "*}\022\310\002\n\024CreateInstanceConfig\022=.google.spa" + + "nner.admin.instance.v1.CreateInstanceCon" + + "figRequest\032\035.google.longrunning.Operatio" + + "n\"\321\001\312Ap\n/google.spanner.admin.instance.v" + + "1.InstanceConfig\022=google.spanner.admin.i" + + "nstance.v1.CreateInstanceConfigMetadata\332" + + "A)parent,instance_config,instance_config" + + "_id\202\323\344\223\002,\"\'/v1/{parent=projects/*}/insta" + + "nceConfigs:\001*\022\312\002\n\024UpdateInstanceConfig\022=" + + ".google.spanner.admin.instance.v1.Update" + + "InstanceConfigRequest\032\035.google.longrunni" + + "ng.Operation\"\323\001\312Ap\n/google.spanner.admin" + + ".instance.v1.InstanceConfig\022=google.span" + + "ner.admin.instance.v1.UpdateInstanceConf" + + "igMetadata\332A\033instance_config,update_mask" + + "\202\323\344\223\002<27/v1/{instance_config.name=projec" + + "ts/*/instanceConfigs/*}:\001*\022\245\001\n\024DeleteIns" + + "tanceConfig\022=.google.spanner.admin.insta" + + "nce.v1.DeleteInstanceConfigRequest\032\026.goo" + + "gle.protobuf.Empty\"6\332A\004name\202\323\344\223\002)*\'/v1/{" + + "name=projects/*/instanceConfigs/*}\022\360\001\n\034L" + + "istInstanceConfigOperations\022E.google.spa" + + "nner.admin.instance.v1.ListInstanceConfi" + + "gOperationsRequest\032F.google.spanner.admi" + + "n.instance.v1.ListInstanceConfigOperatio" + + "nsResponse\"A\332A\006parent\202\323\344\223\0022\0220/v1/{parent" + + "=projects/*}/instanceConfigOperations\022\264\001" + + "\n\rListInstances\0226.google.spanner.admin.i" + + "nstance.v1.ListInstancesRequest\0327.google" + + ".spanner.admin.instance.v1.ListInstances" + + "Response\"2\332A\006parent\202\323\344\223\002#\022!/v1/{parent=p" + + "rojects/*}/instances\022\344\001\n\026ListInstancePar" + + "titions\022?.google.spanner.admin.instance." + + "v1.ListInstancePartitionsRequest\032@.googl" + + "e.spanner.admin.instance.v1.ListInstance" + + "PartitionsResponse\"G\332A\006parent\202\323\344\223\0028\0226/v1" + + "/{parent=projects/*/instances/*}/instanc" + + "ePartitions\022\241\001\n\013GetInstance\0224.google.spa" + + "nner.admin.instance.v1.GetInstanceReques" + + "t\032*.google.spanner.admin.instance.v1.Ins" + + "tance\"0\332A\004name\202\323\344\223\002#\022!/v1/{name=projects" + + "/*/instances/*}\022\234\002\n\016CreateInstance\0227.goo" + + "gle.spanner.admin.instance.v1.CreateInst" + + "anceRequest\032\035.google.longrunning.Operati" + + "on\"\261\001\312Ad\n)google.spanner.admin.instance." + + "v1.Instance\0227google.spanner.admin.instan" + + "ce.v1.CreateInstanceMetadata\332A\033parent,in" + + "stance_id,instance\202\323\344\223\002&\"!/v1/{parent=pr" + + "ojects/*}/instances:\001*\022\235\002\n\016UpdateInstanc" + + "e\0227.google.spanner.admin.instance.v1.Upd" + + "ateInstanceRequest\032\035.google.longrunning." + + "Operation\"\262\001\312Ad\n)google.spanner.admin.in" + + "stance.v1.Instance\0227google.spanner.admin" + + ".instance.v1.UpdateInstanceMetadata\332A\023in" + + "stance,field_mask\202\323\344\223\002/2*/v1/{instance.n" + + "ame=projects/*/instances/*}:\001*\022\223\001\n\016Delet" + + "eInstance\0227.google.spanner.admin.instanc" + + "e.v1.DeleteInstanceRequest\032\026.google.prot" + + "obuf.Empty\"0\332A\004name\202\323\344\223\002#*!/v1/{name=pro" + + "jects/*/instances/*}\022\232\001\n\014SetIamPolicy\022\"." + + "google.iam.v1.SetIamPolicyRequest\032\025.goog" + + "le.iam.v1.Policy\"O\332A\017resource,policy\202\323\344\223" + + "\0027\"2/v1/{resource=projects/*/instances/*" + + "}:setIamPolicy:\001*\022\223\001\n\014GetIamPolicy\022\".goo" + + "gle.iam.v1.GetIamPolicyRequest\032\025.google." + + "iam.v1.Policy\"H\332A\010resource\202\323\344\223\0027\"2/v1/{r" + + "esource=projects/*/instances/*}:getIamPo" + + "licy:\001*\022\305\001\n\022TestIamPermissions\022(.google." + + "iam.v1.TestIamPermissionsRequest\032).googl" + + "e.iam.v1.TestIamPermissionsResponse\"Z\332A\024" + + "resource,permissions\202\323\344\223\002=\"8/v1/{resourc" + + "e=projects/*/instances/*}:testIamPermiss" + + "ions:\001*\022\321\001\n\024GetInstancePartition\022=.googl" + + "e.spanner.admin.instance.v1.GetInstanceP" + + "artitionRequest\0323.google.spanner.admin.i" + + "nstance.v1.InstancePartition\"E\332A\004name\202\323\344" + + "\223\0028\0226/v1/{name=projects/*/instances/*/in" + + "stancePartitions/*}\022\351\002\n\027CreateInstancePa" + + "rtition\022@.google.spanner.admin.instance." + + "v1.CreateInstancePartitionRequest\032\035.goog" + + "le.longrunning.Operation\"\354\001\312Av\n2google.s" + + "panner.admin.instance.v1.InstancePartiti" + + "on\022@google.spanner.admin.instance.v1.Cre" + + "ateInstancePartitionMetadata\332A/parent,in" + + "stance_partition,instance_partition_id\202\323" + + "\344\223\002;\"6/v1/{parent=projects/*/instances/*" + + "}/instancePartitions:\001*\022\272\001\n\027DeleteInstan" + + "cePartition\022@.google.spanner.admin.insta" + + "nce.v1.DeleteInstancePartitionRequest\032\026." + + "google.protobuf.Empty\"E\332A\004name\202\323\344\223\0028*6/v" + + "1/{name=projects/*/instances/*/instanceP" + + "artitions/*}\022\352\002\n\027UpdateInstancePartition" + + "\022@.google.spanner.admin.instance.v1.Upda" + + "teInstancePartitionRequest\032\035.google.long" + + "running.Operation\"\355\001\312Av\n2google.spanner." + + "admin.instance.v1.InstancePartition\022@goo" + + "gle.spanner.admin.instance.v1.UpdateInst" + + "ancePartitionMetadata\332A\035instance_partiti" + + "on,field_mask\202\323\344\223\002N2I/v1/{instance_parti" + + "tion.name=projects/*/instances/*/instanc" + + "ePartitions/*}:\001*\022\210\002\n\037ListInstancePartit" + + "ionOperations\022H.google.spanner.admin.ins" + + "tance.v1.ListInstancePartitionOperations" + + "Request\032I.google.spanner.admin.instance." + + "v1.ListInstancePartitionOperationsRespon" + + "se\"P\332A\006parent\202\323\344\223\002A\022?/v1/{parent=project" + + "s/*/instances/*}/instancePartitionOperat" + + "ions\022\211\002\n\014MoveInstance\0225.google.spanner.a" + + "dmin.instance.v1.MoveInstanceRequest\032\035.g" + + "oogle.longrunning.Operation\"\242\001\312An\n5googl" + + "e.spanner.admin.instance.v1.MoveInstance" + + "Response\0225google.spanner.admin.instance." + + "v1.MoveInstanceMetadata\202\323\344\223\002+\"&/v1/{name" + + "=projects/*/instances/*}:move:\001*\032x\312A\026spa" + + "nner.googleapis.com\322A\\https://www.google" + + "apis.com/auth/cloud-platform,https://www" + + ".googleapis.com/auth/spanner.adminB\213\002\n$c" + + "om.google.spanner.admin.instance.v1B\031Spa" + + "nnerInstanceAdminProtoP\001ZFcloud.google.c" + + "om/go/spanner/admin/instance/apiv1/insta" + + "ncepb;instancepb\252\002&Google.Cloud.Spanner." + + "Admin.Instance.V1\312\002&Google\\Cloud\\Spanner" + + "\\Admin\\Instance\\V1\352\002+Google::Cloud::Span" + + "ner::Admin::Instance::V1b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -577,13 +641,21 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new java.lang.String[] { "Key", "Value", }); - internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_descriptor = + internal_static_google_spanner_admin_instance_v1_ReplicaComputeCapacity_descriptor = getDescriptor().getMessageTypes().get(2); + internal_static_google_spanner_admin_instance_v1_ReplicaComputeCapacity_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_ReplicaComputeCapacity_descriptor, + new java.lang.String[] { + "ReplicaSelection", "NodeCount", "ProcessingUnits", "ComputeCapacity", + }); + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_descriptor = + getDescriptor().getMessageTypes().get(3); internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_descriptor, new java.lang.String[] { - "AutoscalingLimits", "AutoscalingTargets", + "AutoscalingLimits", "AutoscalingTargets", "AsymmetricAutoscalingOptions", }); internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AutoscalingLimits_descriptor = internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_descriptor @@ -610,8 +682,28 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new java.lang.String[] { "HighPriorityCpuUtilizationPercent", "StorageUtilizationPercent", }); + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_descriptor = + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_descriptor + .getNestedTypes() + .get(2); + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_descriptor, + new java.lang.String[] { + "ReplicaSelection", "Overrides", + }); + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_AutoscalingConfigOverrides_descriptor = + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_descriptor + .getNestedTypes() + .get(0); + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_AutoscalingConfigOverrides_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_AutoscalingConfig_AsymmetricAutoscalingOption_AutoscalingConfigOverrides_descriptor, + new java.lang.String[] { + "AutoscalingLimits", "AutoscalingTargetHighPriorityCpuUtilizationPercent", + }); internal_static_google_spanner_admin_instance_v1_Instance_descriptor = - getDescriptor().getMessageTypes().get(3); + getDescriptor().getMessageTypes().get(4); internal_static_google_spanner_admin_instance_v1_Instance_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_Instance_descriptor, @@ -621,12 +713,14 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "DisplayName", "NodeCount", "ProcessingUnits", + "ReplicaComputeCapacity", "AutoscalingConfig", "State", "Labels", "EndpointUris", "CreateTime", "UpdateTime", + "Edition", }); internal_static_google_spanner_admin_instance_v1_Instance_LabelsEntry_descriptor = internal_static_google_spanner_admin_instance_v1_Instance_descriptor @@ -639,7 +733,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Key", "Value", }); internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsRequest_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(5); internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsRequest_descriptor, @@ -647,7 +741,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Parent", "PageSize", "PageToken", }); internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsResponse_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(6); internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_ListInstanceConfigsResponse_descriptor, @@ -655,7 +749,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "InstanceConfigs", "NextPageToken", }); internal_static_google_spanner_admin_instance_v1_GetInstanceConfigRequest_descriptor = - getDescriptor().getMessageTypes().get(6); + getDescriptor().getMessageTypes().get(7); internal_static_google_spanner_admin_instance_v1_GetInstanceConfigRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_GetInstanceConfigRequest_descriptor, @@ -663,7 +757,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Name", }); internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigRequest_descriptor = - getDescriptor().getMessageTypes().get(7); + getDescriptor().getMessageTypes().get(8); internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigRequest_descriptor, @@ -671,7 +765,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Parent", "InstanceConfigId", "InstanceConfig", "ValidateOnly", }); internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigRequest_descriptor = - getDescriptor().getMessageTypes().get(8); + getDescriptor().getMessageTypes().get(9); internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigRequest_descriptor, @@ -679,7 +773,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "InstanceConfig", "UpdateMask", "ValidateOnly", }); internal_static_google_spanner_admin_instance_v1_DeleteInstanceConfigRequest_descriptor = - getDescriptor().getMessageTypes().get(9); + getDescriptor().getMessageTypes().get(10); internal_static_google_spanner_admin_instance_v1_DeleteInstanceConfigRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_DeleteInstanceConfigRequest_descriptor, @@ -687,7 +781,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Name", "Etag", "ValidateOnly", }); internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsRequest_descriptor = - getDescriptor().getMessageTypes().get(10); + getDescriptor().getMessageTypes().get(11); internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsRequest_descriptor, @@ -695,7 +789,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Parent", "Filter", "PageSize", "PageToken", }); internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsResponse_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(12); internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_ListInstanceConfigOperationsResponse_descriptor, @@ -703,7 +797,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Operations", "NextPageToken", }); internal_static_google_spanner_admin_instance_v1_GetInstanceRequest_descriptor = - getDescriptor().getMessageTypes().get(12); + getDescriptor().getMessageTypes().get(13); internal_static_google_spanner_admin_instance_v1_GetInstanceRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_GetInstanceRequest_descriptor, @@ -711,7 +805,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Name", "FieldMask", }); internal_static_google_spanner_admin_instance_v1_CreateInstanceRequest_descriptor = - getDescriptor().getMessageTypes().get(13); + getDescriptor().getMessageTypes().get(14); internal_static_google_spanner_admin_instance_v1_CreateInstanceRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_CreateInstanceRequest_descriptor, @@ -719,7 +813,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Parent", "InstanceId", "Instance", }); internal_static_google_spanner_admin_instance_v1_ListInstancesRequest_descriptor = - getDescriptor().getMessageTypes().get(14); + getDescriptor().getMessageTypes().get(15); internal_static_google_spanner_admin_instance_v1_ListInstancesRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_ListInstancesRequest_descriptor, @@ -727,7 +821,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Parent", "PageSize", "PageToken", "Filter", "InstanceDeadline", }); internal_static_google_spanner_admin_instance_v1_ListInstancesResponse_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(16); internal_static_google_spanner_admin_instance_v1_ListInstancesResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_ListInstancesResponse_descriptor, @@ -735,7 +829,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Instances", "NextPageToken", "Unreachable", }); internal_static_google_spanner_admin_instance_v1_UpdateInstanceRequest_descriptor = - getDescriptor().getMessageTypes().get(16); + getDescriptor().getMessageTypes().get(17); internal_static_google_spanner_admin_instance_v1_UpdateInstanceRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_UpdateInstanceRequest_descriptor, @@ -743,7 +837,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Instance", "FieldMask", }); internal_static_google_spanner_admin_instance_v1_DeleteInstanceRequest_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(18); internal_static_google_spanner_admin_instance_v1_DeleteInstanceRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_DeleteInstanceRequest_descriptor, @@ -751,7 +845,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Name", }); internal_static_google_spanner_admin_instance_v1_CreateInstanceMetadata_descriptor = - getDescriptor().getMessageTypes().get(18); + getDescriptor().getMessageTypes().get(19); internal_static_google_spanner_admin_instance_v1_CreateInstanceMetadata_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_CreateInstanceMetadata_descriptor, @@ -759,7 +853,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Instance", "StartTime", "CancelTime", "EndTime", "ExpectedFulfillmentPeriod", }); internal_static_google_spanner_admin_instance_v1_UpdateInstanceMetadata_descriptor = - getDescriptor().getMessageTypes().get(19); + getDescriptor().getMessageTypes().get(20); internal_static_google_spanner_admin_instance_v1_UpdateInstanceMetadata_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_UpdateInstanceMetadata_descriptor, @@ -767,7 +861,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Instance", "StartTime", "CancelTime", "EndTime", "ExpectedFulfillmentPeriod", }); internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigMetadata_descriptor = - getDescriptor().getMessageTypes().get(20); + getDescriptor().getMessageTypes().get(21); internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigMetadata_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_CreateInstanceConfigMetadata_descriptor, @@ -775,7 +869,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "InstanceConfig", "Progress", "CancelTime", }); internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigMetadata_descriptor = - getDescriptor().getMessageTypes().get(21); + getDescriptor().getMessageTypes().get(22); internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigMetadata_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_UpdateInstanceConfigMetadata_descriptor, @@ -783,7 +877,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "InstanceConfig", "Progress", "CancelTime", }); internal_static_google_spanner_admin_instance_v1_InstancePartition_descriptor = - getDescriptor().getMessageTypes().get(22); + getDescriptor().getMessageTypes().get(23); internal_static_google_spanner_admin_instance_v1_InstancePartition_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_InstancePartition_descriptor, @@ -802,7 +896,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "ComputeCapacity", }); internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionMetadata_descriptor = - getDescriptor().getMessageTypes().get(23); + getDescriptor().getMessageTypes().get(24); internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionMetadata_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionMetadata_descriptor, @@ -810,7 +904,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "InstancePartition", "StartTime", "CancelTime", "EndTime", }); internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionRequest_descriptor = - getDescriptor().getMessageTypes().get(24); + getDescriptor().getMessageTypes().get(25); internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_CreateInstancePartitionRequest_descriptor, @@ -818,7 +912,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Parent", "InstancePartitionId", "InstancePartition", }); internal_static_google_spanner_admin_instance_v1_DeleteInstancePartitionRequest_descriptor = - getDescriptor().getMessageTypes().get(25); + getDescriptor().getMessageTypes().get(26); internal_static_google_spanner_admin_instance_v1_DeleteInstancePartitionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_DeleteInstancePartitionRequest_descriptor, @@ -826,7 +920,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Name", "Etag", }); internal_static_google_spanner_admin_instance_v1_GetInstancePartitionRequest_descriptor = - getDescriptor().getMessageTypes().get(26); + getDescriptor().getMessageTypes().get(27); internal_static_google_spanner_admin_instance_v1_GetInstancePartitionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_GetInstancePartitionRequest_descriptor, @@ -834,7 +928,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Name", }); internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionRequest_descriptor = - getDescriptor().getMessageTypes().get(27); + getDescriptor().getMessageTypes().get(28); internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionRequest_descriptor, @@ -842,7 +936,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "InstancePartition", "FieldMask", }); internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionMetadata_descriptor = - getDescriptor().getMessageTypes().get(28); + getDescriptor().getMessageTypes().get(29); internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionMetadata_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_UpdateInstancePartitionMetadata_descriptor, @@ -850,7 +944,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "InstancePartition", "StartTime", "CancelTime", "EndTime", }); internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsRequest_descriptor = - getDescriptor().getMessageTypes().get(29); + getDescriptor().getMessageTypes().get(30); internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsRequest_descriptor, @@ -858,7 +952,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Parent", "PageSize", "PageToken", "InstancePartitionDeadline", }); internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsResponse_descriptor = - getDescriptor().getMessageTypes().get(30); + getDescriptor().getMessageTypes().get(31); internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_ListInstancePartitionsResponse_descriptor, @@ -866,7 +960,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "InstancePartitions", "NextPageToken", "Unreachable", }); internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsRequest_descriptor = - getDescriptor().getMessageTypes().get(31); + getDescriptor().getMessageTypes().get(32); internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsRequest_descriptor, @@ -874,13 +968,35 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "Parent", "Filter", "PageSize", "PageToken", "InstancePartitionDeadline", }); internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsResponse_descriptor = - getDescriptor().getMessageTypes().get(32); + getDescriptor().getMessageTypes().get(33); internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_admin_instance_v1_ListInstancePartitionOperationsResponse_descriptor, new java.lang.String[] { "Operations", "NextPageToken", "UnreachableInstancePartitions", }); + internal_static_google_spanner_admin_instance_v1_MoveInstanceRequest_descriptor = + getDescriptor().getMessageTypes().get(34); + internal_static_google_spanner_admin_instance_v1_MoveInstanceRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_MoveInstanceRequest_descriptor, + new java.lang.String[] { + "Name", "TargetConfig", + }); + internal_static_google_spanner_admin_instance_v1_MoveInstanceResponse_descriptor = + getDescriptor().getMessageTypes().get(35); + internal_static_google_spanner_admin_instance_v1_MoveInstanceResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_MoveInstanceResponse_descriptor, + new java.lang.String[] {}); + internal_static_google_spanner_admin_instance_v1_MoveInstanceMetadata_descriptor = + getDescriptor().getMessageTypes().get(36); + internal_static_google_spanner_admin_instance_v1_MoveInstanceMetadata_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_spanner_admin_instance_v1_MoveInstanceMetadata_descriptor, + new java.lang.String[] { + "TargetConfig", "Progress", "CancelTime", + }); com.google.protobuf.ExtensionRegistry registry = com.google.protobuf.ExtensionRegistry.newInstance(); registry.add(com.google.api.ClientProto.defaultHost); diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigMetadata.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigMetadata.java index d1a3806d034..b2f0e3dc762 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigMetadata.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigMetadata.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** @@ -69,7 +69,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
    -   * The desired instance config after updating.
    +   * The desired instance configuration after updating.
        * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -84,7 +84,7 @@ public boolean hasInstanceConfig() { * * *
    -   * The desired instance config after updating.
    +   * The desired instance configuration after updating.
        * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -101,7 +101,7 @@ public com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfig() { * * *
    -   * The desired instance config after updating.
    +   * The desired instance configuration after updating.
        * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -661,7 +661,7 @@ public Builder mergeFrom( * * *
    -     * The desired instance config after updating.
    +     * The desired instance configuration after updating.
          * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -675,7 +675,7 @@ public boolean hasInstanceConfig() { * * *
    -     * The desired instance config after updating.
    +     * The desired instance configuration after updating.
          * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -695,7 +695,7 @@ public com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfig() { * * *
    -     * The desired instance config after updating.
    +     * The desired instance configuration after updating.
          * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -717,7 +717,7 @@ public Builder setInstanceConfig(com.google.spanner.admin.instance.v1.InstanceCo * * *
    -     * The desired instance config after updating.
    +     * The desired instance configuration after updating.
          * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -737,7 +737,7 @@ public Builder setInstanceConfig( * * *
    -     * The desired instance config after updating.
    +     * The desired instance configuration after updating.
          * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -765,7 +765,7 @@ public Builder mergeInstanceConfig(com.google.spanner.admin.instance.v1.Instance * * *
    -     * The desired instance config after updating.
    +     * The desired instance configuration after updating.
          * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -784,7 +784,7 @@ public Builder clearInstanceConfig() { * * *
    -     * The desired instance config after updating.
    +     * The desired instance configuration after updating.
          * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -798,7 +798,7 @@ public com.google.spanner.admin.instance.v1.InstanceConfig.Builder getInstanceCo * * *
    -     * The desired instance config after updating.
    +     * The desired instance configuration after updating.
          * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -817,7 +817,7 @@ public com.google.spanner.admin.instance.v1.InstanceConfig.Builder getInstanceCo * * *
    -     * The desired instance config after updating.
    +     * The desired instance configuration after updating.
          * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigMetadataOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigMetadataOrBuilder.java index 98253a2d296..912e8dafc56 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigMetadataOrBuilder.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigMetadataOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; public interface UpdateInstanceConfigMetadataOrBuilder @@ -28,7 +28,7 @@ public interface UpdateInstanceConfigMetadataOrBuilder * * *
    -   * The desired instance config after updating.
    +   * The desired instance configuration after updating.
        * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -40,7 +40,7 @@ public interface UpdateInstanceConfigMetadataOrBuilder * * *
    -   * The desired instance config after updating.
    +   * The desired instance configuration after updating.
        * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; @@ -52,7 +52,7 @@ public interface UpdateInstanceConfigMetadataOrBuilder * * *
    -   * The desired instance config after updating.
    +   * The desired instance configuration after updating.
        * 
    * * .google.spanner.admin.instance.v1.InstanceConfig instance_config = 1; diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigRequest.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigRequest.java index 0b0e3f62bb3..116b52ef5c3 100644 --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigRequest.java +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.admin.instance.v1; /** @@ -69,8 +69,9 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
    -   * Required. The user instance config to update, which must always include the
    -   * instance config name. Otherwise, only fields mentioned in
    +   * Required. The user instance configuration to update, which must always
    +   * include the instance configuration name. Otherwise, only fields mentioned
    +   * in
        * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
        * need be included. To prevent conflicts of concurrent updates,
        * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    @@ -91,8 +92,9 @@ public boolean hasInstanceConfig() {
        *
        *
        * 
    -   * Required. The user instance config to update, which must always include the
    -   * instance config name. Otherwise, only fields mentioned in
    +   * Required. The user instance configuration to update, which must always
    +   * include the instance configuration name. Otherwise, only fields mentioned
    +   * in
        * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
        * need be included. To prevent conflicts of concurrent updates,
        * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    @@ -115,8 +117,9 @@ public com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfig() {
        *
        *
        * 
    -   * Required. The user instance config to update, which must always include the
    -   * instance config name. Otherwise, only fields mentioned in
    +   * Required. The user instance configuration to update, which must always
    +   * include the instance configuration name. Otherwise, only fields mentioned
    +   * in
        * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
        * need be included. To prevent conflicts of concurrent updates,
        * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    @@ -650,8 +653,9 @@ public Builder mergeFrom(
          *
          *
          * 
    -     * Required. The user instance config to update, which must always include the
    -     * instance config name. Otherwise, only fields mentioned in
    +     * Required. The user instance configuration to update, which must always
    +     * include the instance configuration name. Otherwise, only fields mentioned
    +     * in
          * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
          * need be included. To prevent conflicts of concurrent updates,
          * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    @@ -671,8 +675,9 @@ public boolean hasInstanceConfig() {
          *
          *
          * 
    -     * Required. The user instance config to update, which must always include the
    -     * instance config name. Otherwise, only fields mentioned in
    +     * Required. The user instance configuration to update, which must always
    +     * include the instance configuration name. Otherwise, only fields mentioned
    +     * in
          * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
          * need be included. To prevent conflicts of concurrent updates,
          * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    @@ -698,8 +703,9 @@ public com.google.spanner.admin.instance.v1.InstanceConfig getInstanceConfig() {
          *
          *
          * 
    -     * Required. The user instance config to update, which must always include the
    -     * instance config name. Otherwise, only fields mentioned in
    +     * Required. The user instance configuration to update, which must always
    +     * include the instance configuration name. Otherwise, only fields mentioned
    +     * in
          * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
          * need be included. To prevent conflicts of concurrent updates,
          * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    @@ -727,8 +733,9 @@ public Builder setInstanceConfig(com.google.spanner.admin.instance.v1.InstanceCo
          *
          *
          * 
    -     * Required. The user instance config to update, which must always include the
    -     * instance config name. Otherwise, only fields mentioned in
    +     * Required. The user instance configuration to update, which must always
    +     * include the instance configuration name. Otherwise, only fields mentioned
    +     * in
          * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
          * need be included. To prevent conflicts of concurrent updates,
          * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    @@ -754,8 +761,9 @@ public Builder setInstanceConfig(
          *
          *
          * 
    -     * Required. The user instance config to update, which must always include the
    -     * instance config name. Otherwise, only fields mentioned in
    +     * Required. The user instance configuration to update, which must always
    +     * include the instance configuration name. Otherwise, only fields mentioned
    +     * in
          * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
          * need be included. To prevent conflicts of concurrent updates,
          * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    @@ -789,8 +797,9 @@ public Builder mergeInstanceConfig(com.google.spanner.admin.instance.v1.Instance
          *
          *
          * 
    -     * Required. The user instance config to update, which must always include the
    -     * instance config name. Otherwise, only fields mentioned in
    +     * Required. The user instance configuration to update, which must always
    +     * include the instance configuration name. Otherwise, only fields mentioned
    +     * in
          * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
          * need be included. To prevent conflicts of concurrent updates,
          * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    @@ -815,8 +824,9 @@ public Builder clearInstanceConfig() {
          *
          *
          * 
    -     * Required. The user instance config to update, which must always include the
    -     * instance config name. Otherwise, only fields mentioned in
    +     * Required. The user instance configuration to update, which must always
    +     * include the instance configuration name. Otherwise, only fields mentioned
    +     * in
          * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
          * need be included. To prevent conflicts of concurrent updates,
          * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    @@ -836,8 +846,9 @@ public com.google.spanner.admin.instance.v1.InstanceConfig.Builder getInstanceCo
          *
          *
          * 
    -     * Required. The user instance config to update, which must always include the
    -     * instance config name. Otherwise, only fields mentioned in
    +     * Required. The user instance configuration to update, which must always
    +     * include the instance configuration name. Otherwise, only fields mentioned
    +     * in
          * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
          * need be included. To prevent conflicts of concurrent updates,
          * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    @@ -862,8 +873,9 @@ public com.google.spanner.admin.instance.v1.InstanceConfig.Builder getInstanceCo
          *
          *
          * 
    -     * Required. The user instance config to update, which must always include the
    -     * instance config name. Otherwise, only fields mentioned in
    +     * Required. The user instance configuration to update, which must always
    +     * include the instance configuration name. Otherwise, only fields mentioned
    +     * in
          * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
          * need be included. To prevent conflicts of concurrent updates,
          * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigRequestOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigRequestOrBuilder.java
    index 5449ae00d55..3e4e77c9fe7 100644
    --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigRequestOrBuilder.java
    +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceConfigRequestOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.admin.instance.v1;
     
     public interface UpdateInstanceConfigRequestOrBuilder
    @@ -28,8 +28,9 @@ public interface UpdateInstanceConfigRequestOrBuilder
        *
        *
        * 
    -   * Required. The user instance config to update, which must always include the
    -   * instance config name. Otherwise, only fields mentioned in
    +   * Required. The user instance configuration to update, which must always
    +   * include the instance configuration name. Otherwise, only fields mentioned
    +   * in
        * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
        * need be included. To prevent conflicts of concurrent updates,
        * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    @@ -47,8 +48,9 @@ public interface UpdateInstanceConfigRequestOrBuilder
        *
        *
        * 
    -   * Required. The user instance config to update, which must always include the
    -   * instance config name. Otherwise, only fields mentioned in
    +   * Required. The user instance configuration to update, which must always
    +   * include the instance configuration name. Otherwise, only fields mentioned
    +   * in
        * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
        * need be included. To prevent conflicts of concurrent updates,
        * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    @@ -66,8 +68,9 @@ public interface UpdateInstanceConfigRequestOrBuilder
        *
        *
        * 
    -   * Required. The user instance config to update, which must always include the
    -   * instance config name. Otherwise, only fields mentioned in
    +   * Required. The user instance configuration to update, which must always
    +   * include the instance configuration name. Otherwise, only fields mentioned
    +   * in
        * [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
        * need be included. To prevent conflicts of concurrent updates,
        * [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceMetadata.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceMetadata.java
    index fbc356e800f..48063d92821 100644
    --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceMetadata.java
    +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceMetadata.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.admin.instance.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceMetadataOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceMetadataOrBuilder.java
    index cd9fd6b9e87..df942c1e654 100644
    --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceMetadataOrBuilder.java
    +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceMetadataOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.admin.instance.v1;
     
     public interface UpdateInstanceMetadataOrBuilder
    diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionMetadata.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionMetadata.java
    index 8d8acbcb18c..8b74f644a4f 100644
    --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionMetadata.java
    +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionMetadata.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.admin.instance.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionMetadataOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionMetadataOrBuilder.java
    index e7e7afc201f..3a35d54b784 100644
    --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionMetadataOrBuilder.java
    +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionMetadataOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.admin.instance.v1;
     
     public interface UpdateInstancePartitionMetadataOrBuilder
    diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionRequest.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionRequest.java
    index 2cdd9aac0c3..ba5014f2c77 100644
    --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionRequest.java
    +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionRequest.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.admin.instance.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionRequestOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionRequestOrBuilder.java
    index 802e9ba4f7d..cb1458f5630 100644
    --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionRequestOrBuilder.java
    +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstancePartitionRequestOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.admin.instance.v1;
     
     public interface UpdateInstancePartitionRequestOrBuilder
    diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceRequest.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceRequest.java
    index cc73cfff535..ed35d34ac83 100644
    --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceRequest.java
    +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceRequest.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.admin.instance.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceRequestOrBuilder.java b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceRequestOrBuilder.java
    index 4547b94ae22..7fc5dbfe287 100644
    --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceRequestOrBuilder.java
    +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/java/com/google/spanner/admin/instance/v1/UpdateInstanceRequestOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/admin/instance/v1/spanner_instance_admin.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.admin.instance.v1;
     
     public interface UpdateInstanceRequestOrBuilder
    diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/proto/google/spanner/admin/instance/v1/common.proto b/proto-google-cloud-spanner-admin-instance-v1/src/main/proto/google/spanner/admin/instance/v1/common.proto
    index ab6293acffb..69717ec228a 100644
    --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/proto/google/spanner/admin/instance/v1/common.proto
    +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/proto/google/spanner/admin/instance/v1/common.proto
    @@ -1,4 +1,4 @@
    -// Copyright 2023 Google LLC
    +// Copyright 2024 Google LLC
     //
     // Licensed under the Apache License, Version 2.0 (the "License");
     // you may not use this file except in compliance with the License.
    @@ -16,6 +16,7 @@ syntax = "proto3";
     
     package google.spanner.admin.instance.v1;
     
    +import "google/api/field_behavior.proto";
     import "google/protobuf/timestamp.proto";
     
     option csharp_namespace = "Google.Cloud.Spanner.Admin.Instance.V1";
    @@ -54,3 +55,9 @@ enum FulfillmentPeriod {
       // to complete.
       FULFILLMENT_PERIOD_EXTENDED = 2;
     }
    +
    +// ReplicaSelection identifies replicas with common properties.
    +message ReplicaSelection {
    +  // Required. Name of the location of the replicas (e.g., "us-central1").
    +  string location = 1 [(google.api.field_behavior) = REQUIRED];
    +}
    diff --git a/proto-google-cloud-spanner-admin-instance-v1/src/main/proto/google/spanner/admin/instance/v1/spanner_instance_admin.proto b/proto-google-cloud-spanner-admin-instance-v1/src/main/proto/google/spanner/admin/instance/v1/spanner_instance_admin.proto
    index 499bd860363..ece99467a40 100644
    --- a/proto-google-cloud-spanner-admin-instance-v1/src/main/proto/google/spanner/admin/instance/v1/spanner_instance_admin.proto
    +++ b/proto-google-cloud-spanner-admin-instance-v1/src/main/proto/google/spanner/admin/instance/v1/spanner_instance_admin.proto
    @@ -80,38 +80,38 @@ service InstanceAdmin {
         option (google.api.method_signature) = "name";
       }
     
    -  // Creates an instance config and begins preparing it to be used. The
    +  // Creates an instance configuration and begins preparing it to be used. The
       // returned [long-running operation][google.longrunning.Operation]
       // can be used to track the progress of preparing the new
    -  // instance config. The instance config name is assigned by the caller. If the
    -  // named instance config already exists, `CreateInstanceConfig` returns
    -  // `ALREADY_EXISTS`.
    +  // instance configuration. The instance configuration name is assigned by the
    +  // caller. If the named instance configuration already exists,
    +  // `CreateInstanceConfig` returns `ALREADY_EXISTS`.
       //
       // Immediately after the request returns:
       //
    -  //   * The instance config is readable via the API, with all requested
    -  //     attributes. The instance config's
    +  //   * The instance configuration is readable via the API, with all requested
    +  //     attributes. The instance configuration's
       //     [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
       //     field is set to true. Its state is `CREATING`.
       //
       // While the operation is pending:
       //
    -  //   * Cancelling the operation renders the instance config immediately
    +  //   * Cancelling the operation renders the instance configuration immediately
       //     unreadable via the API.
       //   * Except for deleting the creating resource, all other attempts to modify
    -  //     the instance config are rejected.
    +  //     the instance configuration are rejected.
       //
       // Upon completion of the returned operation:
       //
       //   * Instances can be created using the instance configuration.
    -  //   * The instance config's
    +  //   * The instance configuration's
       //   [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
       //   field becomes false. Its state becomes `READY`.
       //
       // The returned [long-running operation][google.longrunning.Operation] will
       // have a name of the format
       // `/operations/` and can be used to track
    -  // creation of the instance config. The
    +  // creation of the instance configuration. The
       // [metadata][google.longrunning.Operation.metadata] field type is
       // [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
       // The [response][google.longrunning.Operation.response] field type is
    @@ -135,16 +135,16 @@ service InstanceAdmin {
         };
       }
     
    -  // Updates an instance config. The returned
    +  // Updates an instance configuration. The returned
       // [long-running operation][google.longrunning.Operation] can be used to track
    -  // the progress of updating the instance. If the named instance config does
    -  // not exist, returns `NOT_FOUND`.
    +  // the progress of updating the instance. If the named instance configuration
    +  // does not exist, returns `NOT_FOUND`.
       //
    -  // Only user managed configurations can be updated.
    +  // Only user-managed configurations can be updated.
       //
       // Immediately after the request returns:
       //
    -  //   * The instance config's
    +  //   * The instance configuration's
       //     [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
       //     field is set to true.
       //
    @@ -154,23 +154,23 @@ service InstanceAdmin {
       //     [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time].
       //     The operation is guaranteed to succeed at undoing all changes, after
       //     which point it terminates with a `CANCELLED` status.
    -  //   * All other attempts to modify the instance config are rejected.
    -  //   * Reading the instance config via the API continues to give the
    +  //   * All other attempts to modify the instance configuration are rejected.
    +  //   * Reading the instance configuration via the API continues to give the
       //     pre-request values.
       //
       // Upon completion of the returned operation:
       //
       //   * Creating instances using the instance configuration uses the new
       //     values.
    -  //   * The instance config's new values are readable via the API.
    -  //   * The instance config's
    +  //   * The new values of the instance configuration are readable via the API.
    +  //   * The instance configuration's
       //   [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
       //   field becomes false.
       //
       // The returned [long-running operation][google.longrunning.Operation] will
       // have a name of the format
       // `/operations/` and can be used to track
    -  // the instance config modification.  The
    +  // the instance configuration modification.  The
       // [metadata][google.longrunning.Operation.metadata] field type is
       // [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata].
       // The [response][google.longrunning.Operation.response] field type is
    @@ -192,11 +192,11 @@ service InstanceAdmin {
         };
       }
     
    -  // Deletes the instance config. Deletion is only allowed when no
    +  // Deletes the instance configuration. Deletion is only allowed when no
       // instances are using the configuration. If any instances are using
    -  // the config, returns `FAILED_PRECONDITION`.
    +  // the configuration, returns `FAILED_PRECONDITION`.
       //
    -  // Only user managed configurations can be deleted.
    +  // Only user-managed configurations can be deleted.
       //
       // Authorization requires `spanner.instanceConfigs.delete` permission on
       // the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
    @@ -208,9 +208,9 @@ service InstanceAdmin {
         option (google.api.method_signature) = "name";
       }
     
    -  // Lists the user-managed instance config [long-running
    +  // Lists the user-managed instance configuration [long-running
       // operations][google.longrunning.Operation] in the given project. An instance
    -  // config operation has a name of the form
    +  // configuration operation has a name of the form
       // `projects//instanceConfigs//operations/`.
       // The long-running operation
       // [metadata][google.longrunning.Operation.metadata] field type
    @@ -567,6 +567,78 @@ service InstanceAdmin {
         };
         option (google.api.method_signature) = "parent";
       }
    +
    +  // Moves an instance to the target instance configuration. You can use the
    +  // returned [long-running operation][google.longrunning.Operation] to track
    +  // the progress of moving the instance.
    +  //
    +  // `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of
    +  // the following criteria:
    +  //
    +  //   * Is undergoing a move to a different instance configuration
    +  //   * Has backups
    +  //   * Has an ongoing update
    +  //   * Contains any CMEK-enabled databases
    +  //   * Is a free trial instance
    +  //
    +  // While the operation is pending:
    +  //
    +  //   * All other attempts to modify the instance, including changes to its
    +  //     compute capacity, are rejected.
    +  //   * The following database and backup admin operations are rejected:
    +  //
    +  //     * `DatabaseAdmin.CreateDatabase`
    +  //     * `DatabaseAdmin.UpdateDatabaseDdl` (disabled if default_leader is
    +  //        specified in the request.)
    +  //     * `DatabaseAdmin.RestoreDatabase`
    +  //     * `DatabaseAdmin.CreateBackup`
    +  //     * `DatabaseAdmin.CopyBackup`
    +  //
    +  //   * Both the source and target instance configurations are subject to
    +  //     hourly compute and storage charges.
    +  //   * The instance might experience higher read-write latencies and a higher
    +  //     transaction abort rate. However, moving an instance doesn't cause any
    +  //     downtime.
    +  //
    +  // The returned [long-running operation][google.longrunning.Operation] has
    +  // a name of the format
    +  // `/operations/` and can be used to track
    +  // the move instance operation. The
    +  // [metadata][google.longrunning.Operation.metadata] field type is
    +  // [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata].
    +  // The [response][google.longrunning.Operation.response] field type is
    +  // [Instance][google.spanner.admin.instance.v1.Instance],
    +  // if successful.
    +  // Cancelling the operation sets its metadata's
    +  // [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time].
    +  // Cancellation is not immediate because it involves moving any data
    +  // previously moved to the target instance configuration back to the original
    +  // instance configuration. You can use this operation to track the progress of
    +  // the cancellation. Upon successful completion of the cancellation, the
    +  // operation terminates with `CANCELLED` status.
    +  //
    +  // If not cancelled, upon completion of the returned operation:
    +  //
    +  //   * The instance successfully moves to the target instance
    +  //     configuration.
    +  //   * You are billed for compute and storage in target instance
    +  //   configuration.
    +  //
    +  // Authorization requires the `spanner.instances.update` permission on
    +  // the resource [instance][google.spanner.admin.instance.v1.Instance].
    +  //
    +  // For more details, see
    +  // [Move an instance](https://cloud.google.com/spanner/docs/move-instance).
    +  rpc MoveInstance(MoveInstanceRequest) returns (google.longrunning.Operation) {
    +    option (google.api.http) = {
    +      post: "/v1/{name=projects/*/instances/*}:move"
    +      body: "*"
    +    };
    +    option (google.longrunning.operation_info) = {
    +      response_type: "google.spanner.admin.instance.v1.MoveInstanceResponse"
    +      metadata_type: "google.spanner.admin.instance.v1.MoveInstanceMetadata"
    +    };
    +  }
     }
     
     message ReplicaInfo {
    @@ -637,29 +709,31 @@ message InstanceConfig {
         USER_MANAGED = 2;
       }
     
    -  // Indicates the current state of the instance config.
    +  // Indicates the current state of the instance configuration.
       enum State {
         // Not specified.
         STATE_UNSPECIFIED = 0;
     
    -    // The instance config is still being created.
    +    // The instance configuration is still being created.
         CREATING = 1;
     
    -    // The instance config is fully created and ready to be used to create
    -    // instances.
    +    // The instance configuration is fully created and ready to be used to
    +    // create instances.
         READY = 2;
       }
     
       // A unique identifier for the instance configuration.  Values
       // are of the form
       // `projects//instanceConfigs/[a-z][-a-z0-9]*`.
    +  //
    +  // User instance configuration must start with `custom-`.
       string name = 1;
     
       // The name of this instance configuration as it appears in UIs.
       string display_name = 2;
     
    -  // Output only. Whether this instance config is a Google or User Managed
    -  // Configuration.
    +  // Output only. Whether this instance configuration is a Google-managed or
    +  // user-managed configuration.
       Type config_type = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
     
       // The geographic placement of nodes in this instance configuration and their
    @@ -703,30 +777,61 @@ message InstanceConfig {
       map labels = 8;
     
       // etag is used for optimistic concurrency control as a way
    -  // to help prevent simultaneous updates of a instance config from overwriting
    -  // each other. It is strongly suggested that systems make use of the etag in
    -  // the read-modify-write cycle to perform instance config updates in order to
    -  // avoid race conditions: An etag is returned in the response which contains
    -  // instance configs, and systems are expected to put that etag in the request
    -  // to update instance config to ensure that their change will be applied to
    -  // the same version of the instance config.
    -  // If no etag is provided in the call to update instance config, then the
    -  // existing instance config is overwritten blindly.
    +  // to help prevent simultaneous updates of a instance configuration from
    +  // overwriting each other. It is strongly suggested that systems make use of
    +  // the etag in the read-modify-write cycle to perform instance configuration
    +  // updates in order to avoid race conditions: An etag is returned in the
    +  // response which contains instance configurations, and systems are expected
    +  // to put that etag in the request to update instance configuration to ensure
    +  // that their change is applied to the same version of the instance
    +  // configuration. If no etag is provided in the call to update the instance
    +  // configuration, then the existing instance configuration is overwritten
    +  // blindly.
       string etag = 9;
     
       // Allowed values of the "default_leader" schema option for databases in
       // instances that use this instance configuration.
       repeated string leader_options = 4;
     
    -  // Output only. If true, the instance config is being created or updated. If
    -  // false, there are no ongoing operations for the instance config.
    +  // Output only. If true, the instance configuration is being created or
    +  // updated. If false, there are no ongoing operations for the instance
    +  // configuration.
       bool reconciling = 10 [(google.api.field_behavior) = OUTPUT_ONLY];
     
    -  // Output only. The current instance config state.
    +  // Output only. The current instance configuration state. Applicable only for
    +  // `USER_MANAGED` configurations.
       State state = 11 [(google.api.field_behavior) = OUTPUT_ONLY];
     }
     
    -// Autoscaling config for an instance.
    +// ReplicaComputeCapacity describes the amount of server resources that are
    +// allocated to each replica identified by the replica selection.
    +message ReplicaComputeCapacity {
    +  // Required. Identifies replicas by specified properties.
    +  // All replicas in the selection have the same amount of compute capacity.
    +  ReplicaSelection replica_selection = 1
    +      [(google.api.field_behavior) = REQUIRED];
    +
    +  // Compute capacity allocated to each replica identified by the specified
    +  // selection.
    +  // The unit is selected based on the unit used to specify the instance size
    +  // for non-autoscaling instances, or the unit used in autoscaling limit for
    +  // autoscaling instances.
    +  oneof compute_capacity {
    +    // The number of nodes allocated to each replica.
    +    //
    +    // This may be zero in API responses for instances that are not yet in
    +    // state `READY`.
    +    int32 node_count = 2;
    +
    +    // The number of processing units allocated to each replica.
    +    //
    +    // This may be zero in API responses for instances that are not yet in
    +    // state `READY`.
    +    int32 processing_units = 3;
    +  }
    +}
    +
    +// Autoscaling configuration for an instance.
     message AutoscalingConfig {
       // The autoscaling limits for the instance. Users can define the minimum and
       // maximum compute capacity allocated to the instance, and the autoscaler will
    @@ -777,6 +882,37 @@ message AutoscalingConfig {
             [(google.api.field_behavior) = REQUIRED];
       }
     
    +  // AsymmetricAutoscalingOption specifies the scaling of replicas identified by
    +  // the given selection.
    +  message AsymmetricAutoscalingOption {
    +    // Overrides the top-level autoscaling configuration for the replicas
    +    // identified by `replica_selection`. All fields in this message are
    +    // optional. Any unspecified fields will use the corresponding values from
    +    // the top-level autoscaling configuration.
    +    message AutoscalingConfigOverrides {
    +      // Optional. If specified, overrides the min/max limit in the top-level
    +      // autoscaling configuration for the selected replicas.
    +      AutoscalingLimits autoscaling_limits = 1
    +          [(google.api.field_behavior) = OPTIONAL];
    +
    +      // Optional. If specified, overrides the autoscaling target
    +      // high_priority_cpu_utilization_percent in the top-level autoscaling
    +      // configuration for the selected replicas.
    +      int32 autoscaling_target_high_priority_cpu_utilization_percent = 2
    +          [(google.api.field_behavior) = OPTIONAL];
    +    }
    +
    +    // Required. Selects the replicas to which this AsymmetricAutoscalingOption
    +    // applies. Only read-only replicas are supported.
    +    ReplicaSelection replica_selection = 1
    +        [(google.api.field_behavior) = REQUIRED];
    +
    +    // Optional. Overrides applied to the top-level autoscaling configuration
    +    // for the selected replicas.
    +    AutoscalingConfigOverrides overrides = 2
    +        [(google.api.field_behavior) = OPTIONAL];
    +  }
    +
       // Required. Autoscaling limits for an instance.
       AutoscalingLimits autoscaling_limits = 1
           [(google.api.field_behavior) = REQUIRED];
    @@ -784,6 +920,18 @@ message AutoscalingConfig {
       // Required. The autoscaling targets for an instance.
       AutoscalingTargets autoscaling_targets = 2
           [(google.api.field_behavior) = REQUIRED];
    +
    +  // Optional. Optional asymmetric autoscaling options.
    +  // Replicas matching the replica selection criteria will be autoscaled
    +  // independently from other replicas. The autoscaler will scale the replicas
    +  // based on the utilization of replicas identified by the replica selection.
    +  // Replica selections should not overlap with each other.
    +  //
    +  // Other replicas (those do not match any replica selection) will be
    +  // autoscaled together and will have the same compute capacity allocated to
    +  // them.
    +  repeated AsymmetricAutoscalingOption asymmetric_autoscaling_options = 3
    +      [(google.api.field_behavior) = OPTIONAL];
     }
     
     // An isolated set of Cloud Spanner resources on which databases can be hosted.
    @@ -808,6 +956,22 @@ message Instance {
         READY = 2;
       }
     
    +  // The edition selected for this instance. Different editions provide
    +  // different capabilities at different price points.
    +  enum Edition {
    +    // Edition not specified.
    +    EDITION_UNSPECIFIED = 0;
    +
    +    // Standard edition.
    +    STANDARD = 1;
    +
    +    // Enterprise edition.
    +    ENTERPRISE = 2;
    +
    +    // Enterprise Plus edition.
    +    ENTERPRISE_PLUS = 3;
    +  }
    +
       // Required. A unique identifier for the instance, which cannot be changed
       // after the instance is created. Values are of the form
       // `projects//instances/[a-z][-a-z0-9]*[a-z0-9]`. The final
    @@ -829,34 +993,56 @@ message Instance {
       // Must be unique per project and between 4 and 30 characters in length.
       string display_name = 3 [(google.api.field_behavior) = REQUIRED];
     
    -  // The number of nodes allocated to this instance. At most one of either
    -  // node_count or processing_units should be present in the message.
    +  // The number of nodes allocated to this instance. At most, one of either
    +  // `node_count` or `processing_units` should be present in the message.
       //
    -  // Users can set the node_count field to specify the target number of nodes
    +  // Users can set the `node_count` field to specify the target number of nodes
       // allocated to the instance.
       //
    -  // This may be zero in API responses for instances that are not yet in state
    -  // `READY`.
    +  // If autoscaling is enabled, `node_count` is treated as an `OUTPUT_ONLY`
    +  // field and reflects the current number of nodes allocated to the instance.
       //
    -  // See [the
    -  // documentation](https://cloud.google.com/spanner/docs/compute-capacity)
    -  // for more information about nodes and processing units.
    +  // This might be zero in API responses for instances that are not yet in the
    +  // `READY` state.
    +  //
    +  // If the instance has varying node count across replicas (achieved by
    +  // setting asymmetric_autoscaling_options in autoscaling config), the
    +  // node_count here is the maximum node count across all replicas.
    +  //
    +  // For more information, see
    +  // [Compute capacity, nodes, and processing
    +  // units](https://cloud.google.com/spanner/docs/compute-capacity).
       int32 node_count = 5;
     
    -  // The number of processing units allocated to this instance. At most one of
    -  // processing_units or node_count should be present in the message.
    +  // The number of processing units allocated to this instance. At most, one of
    +  // either `processing_units` or `node_count` should be present in the message.
       //
    -  // Users can set the processing_units field to specify the target number of
    +  // Users can set the `processing_units` field to specify the target number of
       // processing units allocated to the instance.
       //
    -  // This may be zero in API responses for instances that are not yet in state
    -  // `READY`.
    +  // If autoscaling is enabled, `processing_units` is treated as an
    +  // `OUTPUT_ONLY` field and reflects the current number of processing units
    +  // allocated to the instance.
       //
    -  // See [the
    -  // documentation](https://cloud.google.com/spanner/docs/compute-capacity)
    -  // for more information about nodes and processing units.
    +  // This might be zero in API responses for instances that are not yet in the
    +  // `READY` state.
    +  //
    +  // If the instance has varying processing units per replica
    +  // (achieved by setting asymmetric_autoscaling_options in autoscaling config),
    +  // the processing_units here is the maximum processing units across all
    +  // replicas.
    +  //
    +  // For more information, see
    +  // [Compute capacity, nodes and processing
    +  // units](https://cloud.google.com/spanner/docs/compute-capacity).
       int32 processing_units = 9;
     
    +  // Output only. Lists the compute capacity per ReplicaSelection. A replica
    +  // selection identifies a set of replicas with common properties. Replicas
    +  // identified by a ReplicaSelection are scaled with the same compute capacity.
    +  repeated ReplicaComputeCapacity replica_compute_capacity = 19
    +      [(google.api.field_behavior) = OUTPUT_ONLY];
    +
       // Optional. The autoscaling configuration. Autoscaling is enabled if this
       // field is set. When autoscaling is enabled, node_count and processing_units
       // are treated as OUTPUT_ONLY fields and reflect the current compute capacity
    @@ -904,6 +1090,9 @@ message Instance {
       // Output only. The time at which the instance was most recently updated.
       google.protobuf.Timestamp update_time = 12
           [(google.api.field_behavior) = OUTPUT_ONLY];
    +
    +  // Optional. The `Edition` of the current instance.
    +  Edition edition = 20 [(google.api.field_behavior) = OPTIONAL];
     }
     
     // The request for
    @@ -958,8 +1147,8 @@ message GetInstanceConfigRequest {
     // The request for
     // [CreateInstanceConfigRequest][InstanceAdmin.CreateInstanceConfigRequest].
     message CreateInstanceConfigRequest {
    -  // Required. The name of the project in which to create the instance config.
    -  // Values are of the form `projects/`.
    +  // Required. The name of the project in which to create the instance
    +  // configuration. Values are of the form `projects/`.
       string parent = 1 [
         (google.api.field_behavior) = REQUIRED,
         (google.api.resource_reference) = {
    @@ -967,10 +1156,10 @@ message CreateInstanceConfigRequest {
         }
       ];
     
    -  // Required. The ID of the instance config to create.  Valid identifiers are
    -  // of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
    +  // Required. The ID of the instance configuration to create. Valid identifiers
    +  // are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
       // characters in length. The `custom-` prefix is required to avoid name
    -  // conflicts with Google managed configurations.
    +  // conflicts with Google-managed configurations.
       string instance_config_id = 2 [(google.api.field_behavior) = REQUIRED];
     
       // Required. The InstanceConfig proto of the configuration to create.
    @@ -988,8 +1177,9 @@ message CreateInstanceConfigRequest {
     // The request for
     // [UpdateInstanceConfigRequest][InstanceAdmin.UpdateInstanceConfigRequest].
     message UpdateInstanceConfigRequest {
    -  // Required. The user instance config to update, which must always include the
    -  // instance config name. Otherwise, only fields mentioned in
    +  // Required. The user instance configuration to update, which must always
    +  // include the instance configuration name. Otherwise, only fields mentioned
    +  // in
       // [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
       // need be included. To prevent conflicts of concurrent updates,
       // [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
    @@ -1024,12 +1214,12 @@ message DeleteInstanceConfigRequest {
       ];
     
       // Used for optimistic concurrency control as a way to help prevent
    -  // simultaneous deletes of an instance config from overwriting each
    +  // simultaneous deletes of an instance configuration from overwriting each
       // other. If not empty, the API
    -  // only deletes the instance config when the etag provided matches the current
    -  // status of the requested instance config. Otherwise, deletes the instance
    -  // config without checking the current status of the requested instance
    -  // config.
    +  // only deletes the instance configuration when the etag provided matches the
    +  // current status of the requested instance configuration. Otherwise, deletes
    +  // the instance configuration without checking the current status of the
    +  // requested instance configuration.
       string etag = 2;
     
       // An option to validate, but not actually execute, a request,
    @@ -1040,7 +1230,7 @@ message DeleteInstanceConfigRequest {
     // The request for
     // [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations].
     message ListInstanceConfigOperationsRequest {
    -  // Required. The project of the instance config operations.
    +  // Required. The project of the instance configuration operations.
       // Values are of the form `projects/`.
       string parent = 1 [
         (google.api.field_behavior) = REQUIRED,
    @@ -1089,7 +1279,7 @@ message ListInstanceConfigOperationsRequest {
       //     `(error:*)` - Return operations where:
       //     * The operation's metadata type is
       //     [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
    -  //     * The instance config name contains "custom-config".
    +  //     * The instance configuration name contains "custom-config".
       //     * The operation started before 2021-03-28T14:50:00Z.
       //     * The operation resulted in an error.
       string filter = 2;
    @@ -1109,9 +1299,9 @@ message ListInstanceConfigOperationsRequest {
     // The response for
     // [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations].
     message ListInstanceConfigOperationsResponse {
    -  // The list of matching instance config [long-running
    +  // The list of matching instance configuration [long-running
       // operations][google.longrunning.Operation]. Each operation's name will be
    -  // prefixed by the instance config's name. The operation's
    +  // prefixed by the name of the instance configuration. The operation's
       // [metadata][google.longrunning.Operation.metadata] field type
       // `metadata.type_url` describes the type of the metadata.
       repeated google.longrunning.Operation operations = 1;
    @@ -1313,7 +1503,7 @@ message UpdateInstanceMetadata {
     // Metadata type for the operation returned by
     // [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig].
     message CreateInstanceConfigMetadata {
    -  // The target instance config end state.
    +  // The target instance configuration end state.
       InstanceConfig instance_config = 1;
     
       // The progress of the
    @@ -1328,7 +1518,7 @@ message CreateInstanceConfigMetadata {
     // Metadata type for the operation returned by
     // [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig].
     message UpdateInstanceConfigMetadata {
    -  // The desired instance config after updating.
    +  // The desired instance configuration after updating.
       InstanceConfig instance_config = 1;
     
       // The progress of the
    @@ -1717,3 +1907,47 @@ message ListInstancePartitionOperationsResponse {
       // [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline].
       repeated string unreachable_instance_partitions = 3;
     }
    +
    +// The request for
    +// [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance].
    +message MoveInstanceRequest {
    +  // Required. The instance to move.
    +  // Values are of the form `projects//instances/`.
    +  string name = 1 [
    +    (google.api.field_behavior) = REQUIRED,
    +    (google.api.resource_reference) = {
    +      type: "spanner.googleapis.com/Instance"
    +    }
    +  ];
    +
    +  // Required. The target instance configuration where to move the instance.
    +  // Values are of the form `projects//instanceConfigs/`.
    +  string target_config = 2 [
    +    (google.api.field_behavior) = REQUIRED,
    +    (google.api.resource_reference) = {
    +      type: "spanner.googleapis.com/InstanceConfig"
    +    }
    +  ];
    +}
    +
    +// The response for
    +// [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance].
    +message MoveInstanceResponse {}
    +
    +// Metadata type for the operation returned by
    +// [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance].
    +message MoveInstanceMetadata {
    +  // The target instance configuration where to move the instance.
    +  // Values are of the form `projects//instanceConfigs/`.
    +  string target_config = 1;
    +
    +  // The progress of the
    +  // [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
    +  // operation.
    +  // [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
    +  // is reset when cancellation is requested.
    +  OperationProgress progress = 2;
    +
    +  // The time at which this operation was cancelled.
    +  google.protobuf.Timestamp cancel_time = 3;
    +}
    diff --git a/proto-google-cloud-spanner-executor-v1/clirr-ignored-differences.xml b/proto-google-cloud-spanner-executor-v1/clirr-ignored-differences.xml
    index cfd1e566a20..c8787595be2 100644
    --- a/proto-google-cloud-spanner-executor-v1/clirr-ignored-differences.xml
    +++ b/proto-google-cloud-spanner-executor-v1/clirr-ignored-differences.xml
    @@ -36,4 +36,66 @@
         8001
         com/google/spanner/executor/v1/SpannerExecutorProxyGrpc$SpannerExecutorProxyStub
       
    +
    +  
    +  
    +    7006
    +    com/google/spanner/executor/v1/**
    +    * getDefaultInstanceForType()
    +    **
    +  
    +  
    +    7006
    +    com/google/spanner/executor/v1/**
    +    * addRepeatedField(*)
    +    **
    +  
    +  
    +    7006
    +    com/google/spanner/executor/v1/**
    +    * clear()
    +    **
    +  
    +  
    +    7006
    +    com/google/spanner/executor/v1/**
    +    * clearField(*)
    +    **
    +  
    +  
    +    7006
    +    com/google/spanner/executor/v1/**
    +    * clearOneof(*)
    +    **
    +  
    +  
    +    7006
    +    com/google/spanner/executor/v1/**
    +    * clone()
    +    **
    +  
    +  
    +    7006
    +    com/google/spanner/executor/v1/**
    +    * mergeUnknownFields(*)
    +    **
    +  
    +  
    +    7006
    +    com/google/spanner/executor/v1/**
    +    * setField(*)
    +    **
    +  
    +  
    +    7006
    +    com/google/spanner/executor/v1/**
    +    * setRepeatedField(*)
    +    **
    +  
    +  
    +    7006
    +    com/google/spanner/executor/v1/**
    +    * setUnknownFields(*)
    +    **
    +  
     
    diff --git a/proto-google-cloud-spanner-executor-v1/pom.xml b/proto-google-cloud-spanner-executor-v1/pom.xml
    index db188e15c13..15dc11cabdb 100644
    --- a/proto-google-cloud-spanner-executor-v1/pom.xml
    +++ b/proto-google-cloud-spanner-executor-v1/pom.xml
    @@ -4,13 +4,13 @@
       4.0.0
       com.google.api.grpc
       proto-google-cloud-spanner-executor-v1
    -  6.66.1-SNAPSHOT
    +  6.78.1-SNAPSHOT
       proto-google-cloud-spanner-executor-v1
       Proto library for google-cloud-spanner
       
         com.google.cloud
         google-cloud-spanner-parent
    -    6.66.1-SNAPSHOT
    +    6.78.1-SNAPSHOT
       
       
         
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminAction.java
    index 35595d17983..2e8b4650264 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminActionOrBuilder.java
    index dbb35d398ac..fc8153bda20 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface AdminActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminResult.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminResult.java
    index 2f189229753..00ad983952c 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminResult.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminResult.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminResultOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminResultOrBuilder.java
    index d7595a7f88e..dc334613ce2 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminResultOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/AdminResultOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface AdminResultOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchDmlAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchDmlAction.java
    index 2a1257d6b41..e80d6ed752f 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchDmlAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchDmlAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchDmlActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchDmlActionOrBuilder.java
    index ee256eb0031..9df64620fef 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchDmlActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchDmlActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface BatchDmlActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchPartition.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchPartition.java
    index 2f219fa5d71..d63f9a2ee3c 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchPartition.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchPartition.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchPartitionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchPartitionOrBuilder.java
    index 51f58412545..2989a520696 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchPartitionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/BatchPartitionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface BatchPartitionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CancelOperationAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CancelOperationAction.java
    index 75bca030819..c3ccec7cee9 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CancelOperationAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CancelOperationAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CancelOperationActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CancelOperationActionOrBuilder.java
    index 16b64b93231..b523d4a24d5 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CancelOperationActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CancelOperationActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface CancelOperationActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeQuorumCloudDatabaseAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeQuorumCloudDatabaseAction.java
    index 26d576ac9dc..f9c8a99d4f2 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeQuorumCloudDatabaseAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeQuorumCloudDatabaseAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeQuorumCloudDatabaseActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeQuorumCloudDatabaseActionOrBuilder.java
    index de807b718da..373ac4a663b 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeQuorumCloudDatabaseActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeQuorumCloudDatabaseActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface ChangeQuorumCloudDatabaseActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeStreamRecord.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeStreamRecord.java
    index d60357f100c..edbe5874f5f 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeStreamRecord.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeStreamRecord.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeStreamRecordOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeStreamRecordOrBuilder.java
    index 8abc8e9561a..a66cc1546ff 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeStreamRecordOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChangeStreamRecordOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface ChangeStreamRecordOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChildPartitionsRecord.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChildPartitionsRecord.java
    index fa3c5d1ffdb..98d25aa0f56 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChildPartitionsRecord.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChildPartitionsRecord.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChildPartitionsRecordOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChildPartitionsRecordOrBuilder.java
    index a1cc7f0274f..9276a82690a 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChildPartitionsRecordOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ChildPartitionsRecordOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface ChildPartitionsRecordOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloseBatchTransactionAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloseBatchTransactionAction.java
    index 62ca0bc2a4d..61428cbc5ab 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloseBatchTransactionAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloseBatchTransactionAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloseBatchTransactionActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloseBatchTransactionActionOrBuilder.java
    index 7668ae83cf0..882a1d58afa 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloseBatchTransactionActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloseBatchTransactionActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface CloseBatchTransactionActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudBackupResponse.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudBackupResponse.java
    index 24a1c3d1117..8704af5e5a8 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudBackupResponse.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudBackupResponse.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudBackupResponseOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudBackupResponseOrBuilder.java
    index 6dc3ef0c947..2b1da9bfa5f 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudBackupResponseOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudBackupResponseOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface CloudBackupResponseOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudDatabaseResponse.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudDatabaseResponse.java
    index 718f29df6df..7247bc811ff 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudDatabaseResponse.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudDatabaseResponse.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudDatabaseResponseOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudDatabaseResponseOrBuilder.java
    index cabcd93ca27..b9e880893b5 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudDatabaseResponseOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudDatabaseResponseOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface CloudDatabaseResponseOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudExecutorProto.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudExecutorProto.java
    index dceb9a3291a..39feaa1bc0f 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudExecutorProto.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudExecutorProto.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public final class CloudExecutorProto {
    @@ -252,6 +252,10 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r
           internal_static_google_spanner_executor_v1_GetOperationAction_descriptor;
       static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
           internal_static_google_spanner_executor_v1_GetOperationAction_fieldAccessorTable;
    +  static final com.google.protobuf.Descriptors.Descriptor
    +      internal_static_google_spanner_executor_v1_QueryCancellationAction_descriptor;
    +  static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
    +      internal_static_google_spanner_executor_v1_QueryCancellationAction_fieldAccessorTable;
       static final com.google.protobuf.Descriptors.Descriptor
           internal_static_google_spanner_executor_v1_CancelOperationAction_descriptor;
       static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
    @@ -383,7 +387,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
               + "tor.v1.SpannerAction\"r\n\032SpannerAsyncActi"
               + "onResponse\022\021\n\taction_id\030\001 \001(\005\022A\n\007outcome"
               + "\030\002 \001(\01320.google.spanner.executor.v1.Span"
    -          + "nerActionOutcome\"\236\n\n\rSpannerAction\022\025\n\rda"
    +          + "nerActionOutcome\"\361\n\n\rSpannerAction\022\025\n\rda"
               + "tabase_path\030\001 \001(\t\022C\n\017spanner_options\030\002 \001"
               + "(\0132*.google.spanner.executor.v1.SpannerO"
               + "ptions\022C\n\005start\030\n \001(\01322.google.spanner.e"
    @@ -415,407 +419,411 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
               + "tition\030, \001(\01322.google.spanner.executor.v"
               + "1.ExecutePartitionActionH\000\022[\n\033execute_ch"
               + "ange_stream_query\0302 \001(\01324.google.spanner"
    -          + ".executor.v1.ExecuteChangeStreamQueryH\000B"
    -          + "\010\n\006action\"\212\001\n\nReadAction\022\r\n\005table\030\001 \001(\t\022"
    -          + "\022\n\005index\030\002 \001(\tH\000\210\001\001\022\016\n\006column\030\003 \003(\t\0220\n\004k"
    -          + "eys\030\004 \001(\0132\".google.spanner.executor.v1.K"
    -          + "eySet\022\r\n\005limit\030\005 \001(\005B\010\n\006_index\"\321\001\n\013Query"
    -          + "Action\022\013\n\003sql\030\001 \001(\t\022A\n\006params\030\002 \003(\01321.go"
    -          + "ogle.spanner.executor.v1.QueryAction.Par"
    -          + "ameter\032r\n\tParameter\022\014\n\004name\030\001 \001(\t\022%\n\004typ"
    -          + "e\030\002 \001(\0132\027.google.spanner.v1.Type\0220\n\005valu"
    -          + "e\030\003 \001(\0132!.google.spanner.executor.v1.Val"
    -          + "ue\"\206\001\n\tDmlAction\0227\n\006update\030\001 \001(\0132\'.googl"
    -          + "e.spanner.executor.v1.QueryAction\022$\n\027aut"
    -          + "ocommit_if_supported\030\002 \001(\010H\000\210\001\001B\032\n\030_auto"
    -          + "commit_if_supported\"J\n\016BatchDmlAction\0228\n"
    -          + "\007updates\030\001 \003(\0132\'.google.spanner.executor"
    -          + ".v1.QueryAction\"\311\003\n\005Value\022\021\n\007is_null\030\001 \001"
    -          + "(\010H\000\022\023\n\tint_value\030\002 \001(\003H\000\022\024\n\nbool_value\030"
    -          + "\003 \001(\010H\000\022\026\n\014double_value\030\004 \001(\001H\000\022\025\n\013bytes"
    -          + "_value\030\005 \001(\014H\000\022\026\n\014string_value\030\006 \001(\tH\000\022="
    -          + "\n\014struct_value\030\007 \001(\0132%.google.spanner.ex"
    -          + "ecutor.v1.ValueListH\000\0225\n\017timestamp_value"
    -          + "\030\010 \001(\0132\032.google.protobuf.TimestampH\000\022\031\n\017"
    -          + "date_days_value\030\t \001(\005H\000\022\035\n\023is_commit_tim"
    -          + "estamp\030\n \001(\010H\000\022<\n\013array_value\030\013 \001(\0132%.go"
    -          + "ogle.spanner.executor.v1.ValueListH\000\0220\n\n"
    -          + "array_type\030\014 \001(\0132\027.google.spanner.v1.Typ"
    -          + "eH\001\210\001\001B\014\n\nvalue_typeB\r\n\013_array_type\"\237\002\n\010"
    -          + "KeyRange\0224\n\005start\030\001 \001(\0132%.google.spanner"
    -          + ".executor.v1.ValueList\0224\n\005limit\030\002 \001(\0132%."
    -          + "google.spanner.executor.v1.ValueList\022<\n\004"
    -          + "type\030\003 \001(\0162).google.spanner.executor.v1."
    -          + "KeyRange.TypeH\000\210\001\001\"`\n\004Type\022\024\n\020TYPE_UNSPE"
    -          + "CIFIED\020\000\022\021\n\rCLOSED_CLOSED\020\001\022\017\n\013CLOSED_OP"
    -          + "EN\020\002\022\017\n\013OPEN_CLOSED\020\003\022\r\n\tOPEN_OPEN\020\004B\007\n\005"
    -          + "_type\"\200\001\n\006KeySet\0224\n\005point\030\001 \003(\0132%.google"
    -          + ".spanner.executor.v1.ValueList\0223\n\005range\030"
    -          + "\002 \003(\0132$.google.spanner.executor.v1.KeyRa"
    -          + "nge\022\013\n\003all\030\003 \001(\010\"=\n\tValueList\0220\n\005value\030\001"
    -          + " \003(\0132!.google.spanner.executor.v1.Value\""
    -          + "\274\005\n\016MutationAction\022;\n\003mod\030\001 \003(\0132..google"
    -          + ".spanner.executor.v1.MutationAction.Mod\032"
    -          + "z\n\nInsertArgs\022\016\n\006column\030\001 \003(\t\022%\n\004type\030\002 "
    -          + "\003(\0132\027.google.spanner.v1.Type\0225\n\006values\030\003"
    -          + " \003(\0132%.google.spanner.executor.v1.ValueL"
    -          + "ist\032z\n\nUpdateArgs\022\016\n\006column\030\001 \003(\t\022%\n\004typ"
    -          + "e\030\002 \003(\0132\027.google.spanner.v1.Type\0225\n\006valu"
    -          + "es\030\003 \003(\0132%.google.spanner.executor.v1.Va"
    -          + "lueList\032\364\002\n\003Mod\022\r\n\005table\030\001 \001(\t\022E\n\006insert"
    -          + "\030\002 \001(\01325.google.spanner.executor.v1.Muta"
    -          + "tionAction.InsertArgs\022E\n\006update\030\003 \001(\01325."
    -          + "google.spanner.executor.v1.MutationActio"
    -          + "n.UpdateArgs\022O\n\020insert_or_update\030\004 \001(\01325"
    -          + ".google.spanner.executor.v1.MutationActi"
    -          + "on.InsertArgs\022F\n\007replace\030\005 \001(\01325.google."
    -          + "spanner.executor.v1.MutationAction.Inser"
    -          + "tArgs\0227\n\013delete_keys\030\006 \001(\0132\".google.span"
    -          + "ner.executor.v1.KeySet\"T\n\024WriteMutations"
    -          + "Action\022<\n\010mutation\030\001 \001(\0132*.google.spanne"
    -          + "r.executor.v1.MutationAction\"\337\002\n\027Partiti"
    -          + "onedUpdateAction\022i\n\007options\030\001 \001(\0132S.goog"
    -          + "le.spanner.executor.v1.PartitionedUpdate"
    -          + "Action.ExecutePartitionedUpdateOptionsH\000"
    -          + "\210\001\001\0227\n\006update\030\002 \001(\0132\'.google.spanner.exe"
    -          + "cutor.v1.QueryAction\032\223\001\n\037ExecutePartitio"
    -          + "nedUpdateOptions\022E\n\014rpc_priority\030\001 \001(\0162*"
    -          + ".google.spanner.v1.RequestOptions.Priori"
    -          + "tyH\000\210\001\001\022\020\n\003tag\030\002 \001(\tH\001\210\001\001B\017\n\r_rpc_priori"
    -          + "tyB\006\n\004_tagB\n\n\010_options\"\256\002\n\026StartTransact"
    -          + "ionAction\022A\n\013concurrency\030\001 \001(\0132\'.google."
    -          + "spanner.executor.v1.ConcurrencyH\000\210\001\001\0228\n\005"
    -          + "table\030\002 \003(\0132).google.spanner.executor.v1"
    -          + ".TableMetadata\022\030\n\020transaction_seed\030\003 \001(\t"
    -          + "\022W\n\021execution_options\030\004 \001(\01327.google.spa"
    -          + "nner.executor.v1.TransactionExecutionOpt"
    -          + "ionsH\001\210\001\001B\016\n\014_concurrencyB\024\n\022_execution_"
    -          + "options\"\256\002\n\013Concurrency\022\033\n\021staleness_sec"
    -          + "onds\030\001 \001(\001H\000\022#\n\031min_read_timestamp_micro"
    -          + "s\030\002 \001(\003H\000\022\037\n\025max_staleness_seconds\030\003 \001(\001"
    -          + "H\000\022 \n\026exact_timestamp_micros\030\004 \001(\003H\000\022\020\n\006"
    -          + "strong\030\005 \001(\010H\000\022\017\n\005batch\030\006 \001(\010H\000\022\033\n\023snaps"
    -          + "hot_epoch_read\030\007 \001(\010\022!\n\031snapshot_epoch_r"
    -          + "oot_table\030\010 \001(\t\022#\n\033batch_read_timestamp_"
    -          + "micros\030\t \001(\003B\022\n\020concurrency_mode\"\231\001\n\rTab"
    -          + "leMetadata\022\014\n\004name\030\001 \001(\t\022:\n\006column\030\002 \003(\013"
    -          + "2*.google.spanner.executor.v1.ColumnMeta"
    -          + "data\022>\n\nkey_column\030\003 \003(\0132*.google.spanne"
    -          + "r.executor.v1.ColumnMetadata\"E\n\016ColumnMe"
    -          + "tadata\022\014\n\004name\030\001 \001(\t\022%\n\004type\030\002 \001(\0132\027.goo"
    -          + "gle.spanner.v1.Type\"1\n\033TransactionExecut"
    -          + "ionOptions\022\022\n\noptimistic\030\001 \001(\010\"\230\001\n\027Finis"
    -          + "hTransactionAction\022F\n\004mode\030\001 \001(\01628.googl"
    -          + "e.spanner.executor.v1.FinishTransactionA"
    -          + "ction.Mode\"5\n\004Mode\022\024\n\020MODE_UNSPECIFIED\020\000"
    -          + "\022\n\n\006COMMIT\020\001\022\013\n\007ABANDON\020\002\"\310\023\n\013AdminActio"
    -          + "n\022a\n\033create_user_instance_config\030\001 \001(\0132:"
    -          + ".google.spanner.executor.v1.CreateUserIn"
    -          + "stanceConfigActionH\000\022a\n\033update_user_inst"
    -          + "ance_config\030\002 \001(\0132:.google.spanner.execu"
    -          + "tor.v1.UpdateUserInstanceConfigActionH\000\022"
    -          + "a\n\033delete_user_instance_config\030\003 \001(\0132:.g"
    -          + "oogle.spanner.executor.v1.DeleteUserInst"
    -          + "anceConfigActionH\000\022]\n\031get_cloud_instance"
    -          + "_config\030\004 \001(\01328.google.spanner.executor."
    -          + "v1.GetCloudInstanceConfigActionH\000\022[\n\025lis"
    -          + "t_instance_configs\030\005 \001(\0132:.google.spanne"
    -          + "r.executor.v1.ListCloudInstanceConfigsAc"
    -          + "tionH\000\022V\n\025create_cloud_instance\030\006 \001(\01325."
    -          + "google.spanner.executor.v1.CreateCloudIn"
    -          + "stanceActionH\000\022V\n\025update_cloud_instance\030"
    -          + "\007 \001(\01325.google.spanner.executor.v1.Updat"
    -          + "eCloudInstanceActionH\000\022V\n\025delete_cloud_i"
    -          + "nstance\030\010 \001(\01325.google.spanner.executor."
    -          + "v1.DeleteCloudInstanceActionH\000\022T\n\024list_c"
    -          + "loud_instances\030\t \001(\01324.google.spanner.ex"
    -          + "ecutor.v1.ListCloudInstancesActionH\000\022P\n\022"
    -          + "get_cloud_instance\030\n \001(\01322.google.spanne"
    -          + "r.executor.v1.GetCloudInstanceActionH\000\022V"
    -          + "\n\025create_cloud_database\030\013 \001(\01325.google.s"
    -          + "panner.executor.v1.CreateCloudDatabaseAc"
    -          + "tionH\000\022]\n\031update_cloud_database_ddl\030\014 \001("
    -          + "\01328.google.spanner.executor.v1.UpdateClo"
    -          + "udDatabaseDdlActionH\000\022V\n\025update_cloud_da"
    -          + "tabase\030\033 \001(\01325.google.spanner.executor.v"
    -          + "1.UpdateCloudDatabaseActionH\000\022R\n\023drop_cl"
    -          + "oud_database\030\r \001(\01323.google.spanner.exec"
    -          + "utor.v1.DropCloudDatabaseActionH\000\022T\n\024lis"
    -          + "t_cloud_databases\030\016 \001(\01324.google.spanner"
    -          + ".executor.v1.ListCloudDatabasesActionH\000\022"
    -          + "g\n\036list_cloud_database_operations\030\017 \001(\0132"
    -          + "=.google.spanner.executor.v1.ListCloudDa"
    -          + "tabaseOperationsActionH\000\022X\n\026restore_clou"
    -          + "d_database\030\020 \001(\01326.google.spanner.execut"
    -          + "or.v1.RestoreCloudDatabaseActionH\000\022P\n\022ge"
    -          + "t_cloud_database\030\021 \001(\01322.google.spanner."
    -          + "executor.v1.GetCloudDatabaseActionH\000\022R\n\023"
    -          + "create_cloud_backup\030\022 \001(\01323.google.spann"
    -          + "er.executor.v1.CreateCloudBackupActionH\000"
    -          + "\022N\n\021copy_cloud_backup\030\023 \001(\01321.google.spa"
    -          + "nner.executor.v1.CopyCloudBackupActionH\000"
    -          + "\022L\n\020get_cloud_backup\030\024 \001(\01320.google.span"
    -          + "ner.executor.v1.GetCloudBackupActionH\000\022R"
    -          + "\n\023update_cloud_backup\030\025 \001(\01323.google.spa"
    -          + "nner.executor.v1.UpdateCloudBackupAction"
    -          + "H\000\022R\n\023delete_cloud_backup\030\026 \001(\01323.google"
    -          + ".spanner.executor.v1.DeleteCloudBackupAc"
    -          + "tionH\000\022P\n\022list_cloud_backups\030\027 \001(\01322.goo"
    -          + "gle.spanner.executor.v1.ListCloudBackups"
    -          + "ActionH\000\022c\n\034list_cloud_backup_operations"
    -          + "\030\030 \001(\0132;.google.spanner.executor.v1.List"
    -          + "CloudBackupOperationsActionH\000\022G\n\rget_ope"
    -          + "ration\030\031 \001(\0132..google.spanner.executor.v"
    -          + "1.GetOperationActionH\000\022M\n\020cancel_operati"
    -          + "on\030\032 \001(\01321.google.spanner.executor.v1.Ca"
    -          + "ncelOperationActionH\000\022c\n\034change_quorum_c"
    -          + "loud_database\030\034 \001(\0132;.google.spanner.exe"
    -          + "cutor.v1.ChangeQuorumCloudDatabaseAction"
    -          + "H\000B\010\n\006action\"\245\001\n\036CreateUserInstanceConfi"
    -          + "gAction\022\026\n\016user_config_id\030\001 \001(\t\022\022\n\nproje"
    -          + "ct_id\030\002 \001(\t\022\026\n\016base_config_id\030\003 \001(\t\022?\n\010r"
    -          + "eplicas\030\004 \003(\0132-.google.spanner.admin.ins"
    -          + "tance.v1.ReplicaInfo\"\377\001\n\036UpdateUserInsta"
    -          + "nceConfigAction\022\026\n\016user_config_id\030\001 \001(\t\022"
    -          + "\022\n\nproject_id\030\002 \001(\t\022\031\n\014display_name\030\003 \001("
    -          + "\tH\000\210\001\001\022V\n\006labels\030\004 \003(\0132F.google.spanner."
    -          + "executor.v1.UpdateUserInstanceConfigActi"
    -          + "on.LabelsEntry\032-\n\013LabelsEntry\022\013\n\003key\030\001 \001"
    -          + "(\t\022\r\n\005value\030\002 \001(\t:\0028\001B\017\n\r_display_name\"N"
    -          + "\n\034GetCloudInstanceConfigAction\022\032\n\022instan"
    -          + "ce_config_id\030\001 \001(\t\022\022\n\nproject_id\030\002 \001(\t\"L"
    -          + "\n\036DeleteUserInstanceConfigAction\022\026\n\016user"
    -          + "_config_id\030\001 \001(\t\022\022\n\nproject_id\030\002 \001(\t\"\202\001\n"
    -          + "\036ListCloudInstanceConfigsAction\022\022\n\nproje"
    -          + "ct_id\030\001 \001(\t\022\026\n\tpage_size\030\002 \001(\005H\000\210\001\001\022\027\n\np"
    -          + "age_token\030\003 \001(\tH\001\210\001\001B\014\n\n_page_sizeB\r\n\013_p"
    -          + "age_token\"\253\003\n\031CreateCloudInstanceAction\022"
    -          + "\023\n\013instance_id\030\001 \001(\t\022\022\n\nproject_id\030\002 \001(\t"
    -          + "\022\032\n\022instance_config_id\030\003 \001(\t\022\027\n\nnode_cou"
    -          + "nt\030\004 \001(\005H\000\210\001\001\022\035\n\020processing_units\030\006 \001(\005H"
    -          + "\001\210\001\001\022T\n\022autoscaling_config\030\007 \001(\01323.googl"
    -          + "e.spanner.admin.instance.v1.AutoscalingC"
    -          + "onfigH\002\210\001\001\022Q\n\006labels\030\005 \003(\0132A.google.span"
    -          + "ner.executor.v1.CreateCloudInstanceActio"
    -          + "n.LabelsEntry\032-\n\013LabelsEntry\022\013\n\003key\030\001 \001("
    -          + "\t\022\r\n\005value\030\002 \001(\t:\0028\001B\r\n\013_node_countB\023\n\021_"
    -          + "processing_unitsB\025\n\023_autoscaling_config\""
    -          + "\273\003\n\031UpdateCloudInstanceAction\022\023\n\013instanc"
    -          + "e_id\030\001 \001(\t\022\022\n\nproject_id\030\002 \001(\t\022\031\n\014displa"
    -          + "y_name\030\003 \001(\tH\000\210\001\001\022\027\n\nnode_count\030\004 \001(\005H\001\210"
    -          + "\001\001\022\035\n\020processing_units\030\005 \001(\005H\002\210\001\001\022T\n\022aut"
    -          + "oscaling_config\030\007 \001(\01323.google.spanner.a"
    -          + "dmin.instance.v1.AutoscalingConfigH\003\210\001\001\022"
    -          + "Q\n\006labels\030\006 \003(\0132A.google.spanner.executo"
    -          + "r.v1.UpdateCloudInstanceAction.LabelsEnt"
    -          + "ry\032-\n\013LabelsEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030"
    -          + "\002 \001(\t:\0028\001B\017\n\r_display_nameB\r\n\013_node_coun"
    -          + "tB\023\n\021_processing_unitsB\025\n\023_autoscaling_c"
    -          + "onfig\"D\n\031DeleteCloudInstanceAction\022\023\n\013in"
    -          + "stance_id\030\001 \001(\t\022\022\n\nproject_id\030\002 \001(\t\"\227\002\n\031"
    -          + "CreateCloudDatabaseAction\022\023\n\013instance_id"
    -          + "\030\001 \001(\t\022\022\n\nproject_id\030\002 \001(\t\022\023\n\013database_i"
    -          + "d\030\003 \001(\t\022\025\n\rsdl_statement\030\004 \003(\t\022M\n\021encryp"
    -          + "tion_config\030\005 \001(\01322.google.spanner.admin"
    -          + ".database.v1.EncryptionConfig\022\024\n\007dialect"
    -          + "\030\006 \001(\tH\000\210\001\001\022\036\n\021proto_descriptors\030\007 \001(\014H\001"
    -          + "\210\001\001B\n\n\010_dialectB\024\n\022_proto_descriptors\"\277\001"
    -          + "\n\034UpdateCloudDatabaseDdlAction\022\023\n\013instan"
    -          + "ce_id\030\001 \001(\t\022\022\n\nproject_id\030\002 \001(\t\022\023\n\013datab"
    -          + "ase_id\030\003 \001(\t\022\025\n\rsdl_statement\030\004 \003(\t\022\024\n\014o"
    -          + "peration_id\030\005 \001(\t\022\036\n\021proto_descriptors\030\006"
    -          + " \001(\014H\000\210\001\001B\024\n\022_proto_descriptors\"{\n\031Updat"
    -          + "eCloudDatabaseAction\022\023\n\013instance_id\030\001 \001("
    -          + "\t\022\022\n\nproject_id\030\002 \001(\t\022\025\n\rdatabase_name\030\003"
    -          + " \001(\t\022\036\n\026enable_drop_protection\030\004 \001(\010\"W\n\027"
    -          + "DropCloudDatabaseAction\022\023\n\013instance_id\030\001"
    -          + " \001(\t\022\022\n\nproject_id\030\002 \001(\t\022\023\n\013database_id\030"
    -          + "\003 \001(\t\"h\n\037ChangeQuorumCloudDatabaseAction"
    -          + "\022\031\n\014database_uri\030\001 \001(\tH\000\210\001\001\022\031\n\021serving_l"
    -          + "ocations\030\002 \003(\tB\017\n\r_database_uri\"j\n\030ListC"
    -          + "loudDatabasesAction\022\022\n\nproject_id\030\001 \001(\t\022"
    -          + "\023\n\013instance_id\030\002 \001(\t\022\021\n\tpage_size\030\003 \001(\005\022"
    -          + "\022\n\npage_token\030\004 \001(\t\"\234\001\n\030ListCloudInstanc"
    -          + "esAction\022\022\n\nproject_id\030\001 \001(\t\022\023\n\006filter\030\002"
    -          + " \001(\tH\000\210\001\001\022\026\n\tpage_size\030\003 \001(\005H\001\210\001\001\022\027\n\npag"
    -          + "e_token\030\004 \001(\tH\002\210\001\001B\t\n\007_filterB\014\n\n_page_s"
    -          + "izeB\r\n\013_page_token\"A\n\026GetCloudInstanceAc"
    -          + "tion\022\022\n\nproject_id\030\001 \001(\t\022\023\n\013instance_id\030"
    -          + "\002 \001(\t\"\203\001\n!ListCloudDatabaseOperationsAct"
    -          + "ion\022\022\n\nproject_id\030\001 \001(\t\022\023\n\013instance_id\030\002"
    -          + " \001(\t\022\016\n\006filter\030\003 \001(\t\022\021\n\tpage_size\030\004 \001(\005\022"
    -          + "\022\n\npage_token\030\005 \001(\t\"\341\001\n\032RestoreCloudData"
    -          + "baseAction\022\022\n\nproject_id\030\001 \001(\t\022\032\n\022backup"
    -          + "_instance_id\030\002 \001(\t\022\021\n\tbackup_id\030\003 \001(\t\022\034\n"
    -          + "\024database_instance_id\030\004 \001(\t\022\023\n\013database_"
    -          + "id\030\005 \001(\t\022M\n\021encryption_config\030\007 \001(\01322.go"
    -          + "ogle.spanner.admin.database.v1.Encryptio"
    -          + "nConfig\"V\n\026GetCloudDatabaseAction\022\022\n\npro"
    -          + "ject_id\030\001 \001(\t\022\023\n\013instance_id\030\002 \001(\t\022\023\n\013da"
    -          + "tabase_id\030\003 \001(\t\"\267\002\n\027CreateCloudBackupAct"
    -          + "ion\022\022\n\nproject_id\030\001 \001(\t\022\023\n\013instance_id\030\002"
    -          + " \001(\t\022\021\n\tbackup_id\030\003 \001(\t\022\023\n\013database_id\030\004"
    -          + " \001(\t\0224\n\013expire_time\030\005 \001(\0132\032.google.proto"
    -          + "buf.TimestampB\003\340A\003\0225\n\014version_time\030\006 \001(\013"
    -          + "2\032.google.protobuf.TimestampH\000\210\001\001\022M\n\021enc"
    -          + "ryption_config\030\007 \001(\01322.google.spanner.ad"
    -          + "min.database.v1.EncryptionConfigB\017\n\r_ver"
    -          + "sion_time\"\240\001\n\025CopyCloudBackupAction\022\022\n\np"
    -          + "roject_id\030\001 \001(\t\022\023\n\013instance_id\030\002 \001(\t\022\021\n\t"
    -          + "backup_id\030\003 \001(\t\022\025\n\rsource_backup\030\004 \001(\t\0224"
    -          + "\n\013expire_time\030\005 \001(\0132\032.google.protobuf.Ti"
    -          + "mestampB\003\340A\003\"R\n\024GetCloudBackupAction\022\022\n\n"
    -          + "project_id\030\001 \001(\t\022\023\n\013instance_id\030\002 \001(\t\022\021\n"
    -          + "\tbackup_id\030\003 \001(\t\"\213\001\n\027UpdateCloudBackupAc"
    -          + "tion\022\022\n\nproject_id\030\001 \001(\t\022\023\n\013instance_id\030"
    -          + "\002 \001(\t\022\021\n\tbackup_id\030\003 \001(\t\0224\n\013expire_time\030"
    -          + "\004 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\"U"
    -          + "\n\027DeleteCloudBackupAction\022\022\n\nproject_id\030"
    -          + "\001 \001(\t\022\023\n\013instance_id\030\002 \001(\t\022\021\n\tbackup_id\030"
    -          + "\003 \001(\t\"x\n\026ListCloudBackupsAction\022\022\n\nproje"
    -          + "ct_id\030\001 \001(\t\022\023\n\013instance_id\030\002 \001(\t\022\016\n\006filt"
    -          + "er\030\003 \001(\t\022\021\n\tpage_size\030\004 \001(\005\022\022\n\npage_toke"
    -          + "n\030\005 \001(\t\"\201\001\n\037ListCloudBackupOperationsAct"
    -          + "ion\022\022\n\nproject_id\030\001 \001(\t\022\023\n\013instance_id\030\002"
    -          + " \001(\t\022\016\n\006filter\030\003 \001(\t\022\021\n\tpage_size\030\004 \001(\005\022"
    -          + "\022\n\npage_token\030\005 \001(\t\"\'\n\022GetOperationActio"
    -          + "n\022\021\n\toperation\030\001 \001(\t\"*\n\025CancelOperationA"
    -          + "ction\022\021\n\toperation\030\001 \001(\t\"\210\001\n\033StartBatchT"
    -          + "ransactionAction\0224\n\016batch_txn_time\030\001 \001(\013"
    -          + "2\032.google.protobuf.TimestampH\000\022\r\n\003tid\030\002 "
    -          + "\001(\014H\000\022\033\n\023cloud_database_role\030\003 \001(\tB\007\n\005pa"
    -          + "ram\".\n\033CloseBatchTransactionAction\022\017\n\007cl"
    -          + "eanup\030\001 \001(\010\"\227\002\n!GenerateDbPartitionsForR"
    -          + "eadAction\0224\n\004read\030\001 \001(\0132&.google.spanner"
    -          + ".executor.v1.ReadAction\0228\n\005table\030\002 \003(\0132)"
    -          + ".google.spanner.executor.v1.TableMetadat"
    -          + "a\022(\n\033desired_bytes_per_partition\030\003 \001(\003H\000"
    -          + "\210\001\001\022 \n\023max_partition_count\030\004 \001(\003H\001\210\001\001B\036\n"
    -          + "\034_desired_bytes_per_partitionB\026\n\024_max_pa"
    -          + "rtition_count\"\246\001\n\"GenerateDbPartitionsFo"
    -          + "rQueryAction\0226\n\005query\030\001 \001(\0132\'.google.spa"
    -          + "nner.executor.v1.QueryAction\022(\n\033desired_"
    -          + "bytes_per_partition\030\002 \001(\003H\000\210\001\001B\036\n\034_desir"
    -          + "ed_bytes_per_partition\"x\n\016BatchPartition"
    -          + "\022\021\n\tpartition\030\001 \001(\014\022\027\n\017partition_token\030\002"
    -          + " \001(\014\022\022\n\005table\030\003 \001(\tH\000\210\001\001\022\022\n\005index\030\004 \001(\tH"
    -          + "\001\210\001\001B\010\n\006_tableB\010\n\006_index\"W\n\026ExecuteParti"
    -          + "tionAction\022=\n\tpartition\030\001 \001(\0132*.google.s"
    -          + "panner.executor.v1.BatchPartition\"\216\003\n\030Ex"
    -          + "ecuteChangeStreamQuery\022\014\n\004name\030\001 \001(\t\022.\n\n"
    -          + "start_time\030\002 \001(\0132\032.google.protobuf.Times"
    -          + "tamp\0221\n\010end_time\030\003 \001(\0132\032.google.protobuf"
    -          + ".TimestampH\000\210\001\001\022\034\n\017partition_token\030\004 \001(\t"
    -          + "H\001\210\001\001\022\024\n\014read_options\030\005 \003(\t\022#\n\026heartbeat"
    -          + "_milliseconds\030\006 \001(\005H\002\210\001\001\022\035\n\020deadline_sec"
    -          + "onds\030\007 \001(\003H\003\210\001\001\022 \n\023cloud_database_role\030\010"
    -          + " \001(\tH\004\210\001\001B\013\n\t_end_timeB\022\n\020_partition_tok"
    -          + "enB\031\n\027_heartbeat_millisecondsB\023\n\021_deadli"
    -          + "ne_secondsB\026\n\024_cloud_database_role\"\242\005\n\024S"
    -          + "pannerActionOutcome\022\'\n\006status\030\001 \001(\0132\022.go"
    -          + "ogle.rpc.StatusH\000\210\001\001\0224\n\013commit_time\030\002 \001("
    -          + "\0132\032.google.protobuf.TimestampH\001\210\001\001\022@\n\013re"
    -          + "ad_result\030\003 \001(\0132&.google.spanner.executo"
    -          + "r.v1.ReadResultH\002\210\001\001\022B\n\014query_result\030\004 \001"
    -          + "(\0132\'.google.spanner.executor.v1.QueryRes"
    -          + "ultH\003\210\001\001\022\"\n\025transaction_restarted\030\005 \001(\010H"
    -          + "\004\210\001\001\022\031\n\014batch_txn_id\030\006 \001(\014H\005\210\001\001\022@\n\014db_pa"
    -          + "rtition\030\007 \003(\0132*.google.spanner.executor."
    -          + "v1.BatchPartition\022B\n\014admin_result\030\010 \001(\0132"
    -          + "\'.google.spanner.executor.v1.AdminResult"
    -          + "H\006\210\001\001\022\031\n\021dml_rows_modified\030\t \003(\003\022M\n\025chan"
    -          + "ge_stream_records\030\n \003(\0132..google.spanner"
    -          + ".executor.v1.ChangeStreamRecordB\t\n\007_stat"
    -          + "usB\016\n\014_commit_timeB\016\n\014_read_resultB\017\n\r_q"
    -          + "uery_resultB\030\n\026_transaction_restartedB\017\n"
    -          + "\r_batch_txn_idB\017\n\r_admin_result\"\231\003\n\013Admi"
    -          + "nResult\022H\n\017backup_response\030\001 \001(\0132/.googl"
    -          + "e.spanner.executor.v1.CloudBackupRespons"
    -          + "e\022I\n\022operation_response\030\002 \001(\0132-.google.s"
    -          + "panner.executor.v1.OperationResponse\022L\n\021"
    -          + "database_response\030\003 \001(\01321.google.spanner"
    -          + ".executor.v1.CloudDatabaseResponse\022L\n\021in"
    -          + "stance_response\030\004 \001(\01321.google.spanner.e"
    -          + "xecutor.v1.CloudInstanceResponse\022Y\n\030inst"
    -          + "ance_config_response\030\005 \001(\01327.google.span"
    -          + "ner.executor.v1.CloudInstanceConfigRespo"
    -          + "nse\"\353\001\n\023CloudBackupResponse\022@\n\016listed_ba"
    -          + "ckups\030\001 \003(\0132(.google.spanner.admin.datab"
    -          + "ase.v1.Backup\022?\n\030listed_backup_operation"
    -          + "s\030\002 \003(\0132\035.google.longrunning.Operation\022\027"
    -          + "\n\017next_page_token\030\003 \001(\t\0228\n\006backup\030\004 \001(\0132"
    -          + "(.google.spanner.admin.database.v1.Backu"
    -          + "p\"\230\001\n\021OperationResponse\0228\n\021listed_operat"
    -          + "ions\030\001 \003(\0132\035.google.longrunning.Operatio"
    -          + "n\022\027\n\017next_page_token\030\002 \001(\t\0220\n\toperation\030"
    -          + "\003 \001(\0132\035.google.longrunning.Operation\"\264\001\n"
    -          + "\025CloudInstanceResponse\022D\n\020listed_instanc"
    -          + "es\030\001 \003(\0132*.google.spanner.admin.instance"
    -          + ".v1.Instance\022\027\n\017next_page_token\030\002 \001(\t\022<\n"
    -          + "\010instance\030\003 \001(\0132*.google.spanner.admin.i"
    -          + "nstance.v1.Instance\"\324\001\n\033CloudInstanceCon"
    -          + "figResponse\022Q\n\027listed_instance_configs\030\001"
    -          + " \003(\01320.google.spanner.admin.instance.v1."
    -          + "InstanceConfig\022\027\n\017next_page_token\030\002 \001(\t\022"
    -          + "I\n\017instance_config\030\003 \001(\01320.google.spanne"
    -          + "r.admin.instance.v1.InstanceConfig\"\367\001\n\025C"
    -          + "loudDatabaseResponse\022D\n\020listed_databases"
    -          + "\030\001 \003(\0132*.google.spanner.admin.database.v"
    -          + "1.Database\022A\n\032listed_database_operations"
    -          + "\030\002 \003(\0132\035.google.longrunning.Operation\022\027\n"
    -          + "\017next_page_token\030\003 \001(\t\022<\n\010database\030\004 \001(\013"
    -          + "2*.google.spanner.admin.database.v1.Data",
    -      "base\"\336\001\n\nReadResult\022\r\n\005table\030\001 \001(\t\022\022\n\005in"
    -          + "dex\030\002 \001(\tH\000\210\001\001\022\032\n\rrequest_index\030\003 \001(\005H\001\210"
    -          + "\001\001\0222\n\003row\030\004 \003(\0132%.google.spanner.executo"
    -          + "r.v1.ValueList\0224\n\010row_type\030\005 \001(\0132\035.googl"
    -          + "e.spanner.v1.StructTypeH\002\210\001\001B\010\n\006_indexB\020"
    -          + "\n\016_request_indexB\013\n\t_row_type\"\204\001\n\013QueryR"
    -          + "esult\0222\n\003row\030\001 \003(\0132%.google.spanner.exec"
    -          + "utor.v1.ValueList\0224\n\010row_type\030\002 \001(\0132\035.go"
    -          + "ogle.spanner.v1.StructTypeH\000\210\001\001B\013\n\t_row_"
    -          + "type\"\363\001\n\022ChangeStreamRecord\022C\n\013data_chan"
    -          + "ge\030\001 \001(\0132,.google.spanner.executor.v1.Da"
    -          + "taChangeRecordH\000\022L\n\017child_partition\030\002 \001("
    -          + "\01321.google.spanner.executor.v1.ChildPart"
    -          + "itionsRecordH\000\022@\n\theartbeat\030\003 \001(\0132+.goog"
    -          + "le.spanner.executor.v1.HeartbeatRecordH\000"
    -          + "B\010\n\006record\"\330\004\n\020DataChangeRecord\022/\n\013commi"
    -          + "t_time\030\001 \001(\0132\032.google.protobuf.Timestamp"
    -          + "\022\027\n\017record_sequence\030\002 \001(\t\022\026\n\016transaction"
    -          + "_id\030\003 \001(\t\022\026\n\016is_last_record\030\004 \001(\010\022\r\n\005tab"
    -          + "le\030\005 \001(\t\022M\n\014column_types\030\006 \003(\01327.google."
    -          + "spanner.executor.v1.DataChangeRecord.Col"
    -          + "umnType\022>\n\004mods\030\007 \003(\01320.google.spanner.e"
    -          + "xecutor.v1.DataChangeRecord.Mod\022\020\n\010mod_t"
    -          + "ype\030\010 \001(\t\022\032\n\022value_capture_type\030\t \001(\t\022\024\n"
    -          + "\014record_count\030\n \001(\003\022\027\n\017partition_count\030\013"
    -          + " \001(\003\022\027\n\017transaction_tag\030\014 \001(\t\022\035\n\025is_syst"
    -          + "em_transaction\030\r \001(\010\032Z\n\nColumnType\022\014\n\004na"
    -          + "me\030\001 \001(\t\022\014\n\004type\030\002 \001(\t\022\026\n\016is_primary_key"
    -          + "\030\003 \001(\010\022\030\n\020ordinal_position\030\004 \001(\003\032;\n\003Mod\022"
    -          + "\014\n\004keys\030\001 \001(\t\022\022\n\nnew_values\030\002 \001(\t\022\022\n\nold"
    -          + "_values\030\003 \001(\t\"\376\001\n\025ChildPartitionsRecord\022"
    -          + ".\n\nstart_time\030\001 \001(\0132\032.google.protobuf.Ti"
    -          + "mestamp\022\027\n\017record_sequence\030\002 \001(\t\022Z\n\020chil"
    -          + "d_partitions\030\003 \003(\0132@.google.spanner.exec"
    -          + "utor.v1.ChildPartitionsRecord.ChildParti"
    -          + "tion\032@\n\016ChildPartition\022\r\n\005token\030\001 \001(\t\022\037\n"
    -          + "\027parent_partition_tokens\030\002 \003(\t\"E\n\017Heartb"
    -          + "eatRecord\0222\n\016heartbeat_time\030\001 \001(\0132\032.goog"
    -          + "le.protobuf.Timestamp\"^\n\016SpannerOptions\022"
    -          + "L\n\024session_pool_options\030\001 \001(\0132..google.s"
    -          + "panner.executor.v1.SessionPoolOptions\"-\n"
    -          + "\022SessionPoolOptions\022\027\n\017use_multiplexed\030\001"
    -          + " \001(\0102\314\001\n\024SpannerExecutorProxy\022\211\001\n\022Execut"
    -          + "eActionAsync\0225.google.spanner.executor.v"
    -          + "1.SpannerAsyncActionRequest\0326.google.spa"
    -          + "nner.executor.v1.SpannerAsyncActionRespo"
    -          + "nse\"\000(\0010\001\032(\312A%spanner-cloud-executor.goo"
    -          + "gleapis.comBx\n\036com.google.spanner.execut"
    -          + "or.v1B\022CloudExecutorProtoP\001Z@cloud.googl"
    -          + "e.com/go/spanner/executor/apiv1/executor"
    -          + "pb;executorpbb\006proto3"
    +          + ".executor.v1.ExecuteChangeStreamQueryH\000\022"
    +          + "Q\n\022query_cancellation\0303 \001(\01323.google.spa"
    +          + "nner.executor.v1.QueryCancellationAction"
    +          + "H\000B\010\n\006action\"\212\001\n\nReadAction\022\r\n\005table\030\001 \001"
    +          + "(\t\022\022\n\005index\030\002 \001(\tH\000\210\001\001\022\016\n\006column\030\003 \003(\t\0220"
    +          + "\n\004keys\030\004 \001(\0132\".google.spanner.executor.v"
    +          + "1.KeySet\022\r\n\005limit\030\005 \001(\005B\010\n\006_index\"\321\001\n\013Qu"
    +          + "eryAction\022\013\n\003sql\030\001 \001(\t\022A\n\006params\030\002 \003(\01321"
    +          + ".google.spanner.executor.v1.QueryAction."
    +          + "Parameter\032r\n\tParameter\022\014\n\004name\030\001 \001(\t\022%\n\004"
    +          + "type\030\002 \001(\0132\027.google.spanner.v1.Type\0220\n\005v"
    +          + "alue\030\003 \001(\0132!.google.spanner.executor.v1."
    +          + "Value\"\206\001\n\tDmlAction\0227\n\006update\030\001 \001(\0132\'.go"
    +          + "ogle.spanner.executor.v1.QueryAction\022$\n\027"
    +          + "autocommit_if_supported\030\002 \001(\010H\000\210\001\001B\032\n\030_a"
    +          + "utocommit_if_supported\"J\n\016BatchDmlAction"
    +          + "\0228\n\007updates\030\001 \003(\0132\'.google.spanner.execu"
    +          + "tor.v1.QueryAction\"\311\003\n\005Value\022\021\n\007is_null\030"
    +          + "\001 \001(\010H\000\022\023\n\tint_value\030\002 \001(\003H\000\022\024\n\nbool_val"
    +          + "ue\030\003 \001(\010H\000\022\026\n\014double_value\030\004 \001(\001H\000\022\025\n\013by"
    +          + "tes_value\030\005 \001(\014H\000\022\026\n\014string_value\030\006 \001(\tH"
    +          + "\000\022=\n\014struct_value\030\007 \001(\0132%.google.spanner"
    +          + ".executor.v1.ValueListH\000\0225\n\017timestamp_va"
    +          + "lue\030\010 \001(\0132\032.google.protobuf.TimestampH\000\022"
    +          + "\031\n\017date_days_value\030\t \001(\005H\000\022\035\n\023is_commit_"
    +          + "timestamp\030\n \001(\010H\000\022<\n\013array_value\030\013 \001(\0132%"
    +          + ".google.spanner.executor.v1.ValueListH\000\022"
    +          + "0\n\narray_type\030\014 \001(\0132\027.google.spanner.v1."
    +          + "TypeH\001\210\001\001B\014\n\nvalue_typeB\r\n\013_array_type\"\237"
    +          + "\002\n\010KeyRange\0224\n\005start\030\001 \001(\0132%.google.span"
    +          + "ner.executor.v1.ValueList\0224\n\005limit\030\002 \001(\013"
    +          + "2%.google.spanner.executor.v1.ValueList\022"
    +          + "<\n\004type\030\003 \001(\0162).google.spanner.executor."
    +          + "v1.KeyRange.TypeH\000\210\001\001\"`\n\004Type\022\024\n\020TYPE_UN"
    +          + "SPECIFIED\020\000\022\021\n\rCLOSED_CLOSED\020\001\022\017\n\013CLOSED"
    +          + "_OPEN\020\002\022\017\n\013OPEN_CLOSED\020\003\022\r\n\tOPEN_OPEN\020\004B"
    +          + "\007\n\005_type\"\200\001\n\006KeySet\0224\n\005point\030\001 \003(\0132%.goo"
    +          + "gle.spanner.executor.v1.ValueList\0223\n\005ran"
    +          + "ge\030\002 \003(\0132$.google.spanner.executor.v1.Ke"
    +          + "yRange\022\013\n\003all\030\003 \001(\010\"=\n\tValueList\0220\n\005valu"
    +          + "e\030\001 \003(\0132!.google.spanner.executor.v1.Val"
    +          + "ue\"\274\005\n\016MutationAction\022;\n\003mod\030\001 \003(\0132..goo"
    +          + "gle.spanner.executor.v1.MutationAction.M"
    +          + "od\032z\n\nInsertArgs\022\016\n\006column\030\001 \003(\t\022%\n\004type"
    +          + "\030\002 \003(\0132\027.google.spanner.v1.Type\0225\n\006value"
    +          + "s\030\003 \003(\0132%.google.spanner.executor.v1.Val"
    +          + "ueList\032z\n\nUpdateArgs\022\016\n\006column\030\001 \003(\t\022%\n\004"
    +          + "type\030\002 \003(\0132\027.google.spanner.v1.Type\0225\n\006v"
    +          + "alues\030\003 \003(\0132%.google.spanner.executor.v1"
    +          + ".ValueList\032\364\002\n\003Mod\022\r\n\005table\030\001 \001(\t\022E\n\006ins"
    +          + "ert\030\002 \001(\01325.google.spanner.executor.v1.M"
    +          + "utationAction.InsertArgs\022E\n\006update\030\003 \001(\013"
    +          + "25.google.spanner.executor.v1.MutationAc"
    +          + "tion.UpdateArgs\022O\n\020insert_or_update\030\004 \001("
    +          + "\01325.google.spanner.executor.v1.MutationA"
    +          + "ction.InsertArgs\022F\n\007replace\030\005 \001(\01325.goog"
    +          + "le.spanner.executor.v1.MutationAction.In"
    +          + "sertArgs\0227\n\013delete_keys\030\006 \001(\0132\".google.s"
    +          + "panner.executor.v1.KeySet\"T\n\024WriteMutati"
    +          + "onsAction\022<\n\010mutation\030\001 \001(\0132*.google.spa"
    +          + "nner.executor.v1.MutationAction\"\337\002\n\027Part"
    +          + "itionedUpdateAction\022i\n\007options\030\001 \001(\0132S.g"
    +          + "oogle.spanner.executor.v1.PartitionedUpd"
    +          + "ateAction.ExecutePartitionedUpdateOption"
    +          + "sH\000\210\001\001\0227\n\006update\030\002 \001(\0132\'.google.spanner."
    +          + "executor.v1.QueryAction\032\223\001\n\037ExecuteParti"
    +          + "tionedUpdateOptions\022E\n\014rpc_priority\030\001 \001("
    +          + "\0162*.google.spanner.v1.RequestOptions.Pri"
    +          + "orityH\000\210\001\001\022\020\n\003tag\030\002 \001(\tH\001\210\001\001B\017\n\r_rpc_pri"
    +          + "orityB\006\n\004_tagB\n\n\010_options\"\256\002\n\026StartTrans"
    +          + "actionAction\022A\n\013concurrency\030\001 \001(\0132\'.goog"
    +          + "le.spanner.executor.v1.ConcurrencyH\000\210\001\001\022"
    +          + "8\n\005table\030\002 \003(\0132).google.spanner.executor"
    +          + ".v1.TableMetadata\022\030\n\020transaction_seed\030\003 "
    +          + "\001(\t\022W\n\021execution_options\030\004 \001(\01327.google."
    +          + "spanner.executor.v1.TransactionExecution"
    +          + "OptionsH\001\210\001\001B\016\n\014_concurrencyB\024\n\022_executi"
    +          + "on_options\"\256\002\n\013Concurrency\022\033\n\021staleness_"
    +          + "seconds\030\001 \001(\001H\000\022#\n\031min_read_timestamp_mi"
    +          + "cros\030\002 \001(\003H\000\022\037\n\025max_staleness_seconds\030\003 "
    +          + "\001(\001H\000\022 \n\026exact_timestamp_micros\030\004 \001(\003H\000\022"
    +          + "\020\n\006strong\030\005 \001(\010H\000\022\017\n\005batch\030\006 \001(\010H\000\022\033\n\023sn"
    +          + "apshot_epoch_read\030\007 \001(\010\022!\n\031snapshot_epoc"
    +          + "h_root_table\030\010 \001(\t\022#\n\033batch_read_timesta"
    +          + "mp_micros\030\t \001(\003B\022\n\020concurrency_mode\"\231\001\n\r"
    +          + "TableMetadata\022\014\n\004name\030\001 \001(\t\022:\n\006column\030\002 "
    +          + "\003(\0132*.google.spanner.executor.v1.ColumnM"
    +          + "etadata\022>\n\nkey_column\030\003 \003(\0132*.google.spa"
    +          + "nner.executor.v1.ColumnMetadata\"E\n\016Colum"
    +          + "nMetadata\022\014\n\004name\030\001 \001(\t\022%\n\004type\030\002 \001(\0132\027."
    +          + "google.spanner.v1.Type\"1\n\033TransactionExe"
    +          + "cutionOptions\022\022\n\noptimistic\030\001 \001(\010\"\230\001\n\027Fi"
    +          + "nishTransactionAction\022F\n\004mode\030\001 \001(\01628.go"
    +          + "ogle.spanner.executor.v1.FinishTransacti"
    +          + "onAction.Mode\"5\n\004Mode\022\024\n\020MODE_UNSPECIFIE"
    +          + "D\020\000\022\n\n\006COMMIT\020\001\022\013\n\007ABANDON\020\002\"\310\023\n\013AdminAc"
    +          + "tion\022a\n\033create_user_instance_config\030\001 \001("
    +          + "\0132:.google.spanner.executor.v1.CreateUse"
    +          + "rInstanceConfigActionH\000\022a\n\033update_user_i"
    +          + "nstance_config\030\002 \001(\0132:.google.spanner.ex"
    +          + "ecutor.v1.UpdateUserInstanceConfigAction"
    +          + "H\000\022a\n\033delete_user_instance_config\030\003 \001(\0132"
    +          + ":.google.spanner.executor.v1.DeleteUserI"
    +          + "nstanceConfigActionH\000\022]\n\031get_cloud_insta"
    +          + "nce_config\030\004 \001(\01328.google.spanner.execut"
    +          + "or.v1.GetCloudInstanceConfigActionH\000\022[\n\025"
    +          + "list_instance_configs\030\005 \001(\0132:.google.spa"
    +          + "nner.executor.v1.ListCloudInstanceConfig"
    +          + "sActionH\000\022V\n\025create_cloud_instance\030\006 \001(\013"
    +          + "25.google.spanner.executor.v1.CreateClou"
    +          + "dInstanceActionH\000\022V\n\025update_cloud_instan"
    +          + "ce\030\007 \001(\01325.google.spanner.executor.v1.Up"
    +          + "dateCloudInstanceActionH\000\022V\n\025delete_clou"
    +          + "d_instance\030\010 \001(\01325.google.spanner.execut"
    +          + "or.v1.DeleteCloudInstanceActionH\000\022T\n\024lis"
    +          + "t_cloud_instances\030\t \001(\01324.google.spanner"
    +          + ".executor.v1.ListCloudInstancesActionH\000\022"
    +          + "P\n\022get_cloud_instance\030\n \001(\01322.google.spa"
    +          + "nner.executor.v1.GetCloudInstanceActionH"
    +          + "\000\022V\n\025create_cloud_database\030\013 \001(\01325.googl"
    +          + "e.spanner.executor.v1.CreateCloudDatabas"
    +          + "eActionH\000\022]\n\031update_cloud_database_ddl\030\014"
    +          + " \001(\01328.google.spanner.executor.v1.Update"
    +          + "CloudDatabaseDdlActionH\000\022V\n\025update_cloud"
    +          + "_database\030\033 \001(\01325.google.spanner.executo"
    +          + "r.v1.UpdateCloudDatabaseActionH\000\022R\n\023drop"
    +          + "_cloud_database\030\r \001(\01323.google.spanner.e"
    +          + "xecutor.v1.DropCloudDatabaseActionH\000\022T\n\024"
    +          + "list_cloud_databases\030\016 \001(\01324.google.span"
    +          + "ner.executor.v1.ListCloudDatabasesAction"
    +          + "H\000\022g\n\036list_cloud_database_operations\030\017 \001"
    +          + "(\0132=.google.spanner.executor.v1.ListClou"
    +          + "dDatabaseOperationsActionH\000\022X\n\026restore_c"
    +          + "loud_database\030\020 \001(\01326.google.spanner.exe"
    +          + "cutor.v1.RestoreCloudDatabaseActionH\000\022P\n"
    +          + "\022get_cloud_database\030\021 \001(\01322.google.spann"
    +          + "er.executor.v1.GetCloudDatabaseActionH\000\022"
    +          + "R\n\023create_cloud_backup\030\022 \001(\01323.google.sp"
    +          + "anner.executor.v1.CreateCloudBackupActio"
    +          + "nH\000\022N\n\021copy_cloud_backup\030\023 \001(\01321.google."
    +          + "spanner.executor.v1.CopyCloudBackupActio"
    +          + "nH\000\022L\n\020get_cloud_backup\030\024 \001(\01320.google.s"
    +          + "panner.executor.v1.GetCloudBackupActionH"
    +          + "\000\022R\n\023update_cloud_backup\030\025 \001(\01323.google."
    +          + "spanner.executor.v1.UpdateCloudBackupAct"
    +          + "ionH\000\022R\n\023delete_cloud_backup\030\026 \001(\01323.goo"
    +          + "gle.spanner.executor.v1.DeleteCloudBacku"
    +          + "pActionH\000\022P\n\022list_cloud_backups\030\027 \001(\01322."
    +          + "google.spanner.executor.v1.ListCloudBack"
    +          + "upsActionH\000\022c\n\034list_cloud_backup_operati"
    +          + "ons\030\030 \001(\0132;.google.spanner.executor.v1.L"
    +          + "istCloudBackupOperationsActionH\000\022G\n\rget_"
    +          + "operation\030\031 \001(\0132..google.spanner.executo"
    +          + "r.v1.GetOperationActionH\000\022M\n\020cancel_oper"
    +          + "ation\030\032 \001(\01321.google.spanner.executor.v1"
    +          + ".CancelOperationActionH\000\022c\n\034change_quoru"
    +          + "m_cloud_database\030\034 \001(\0132;.google.spanner."
    +          + "executor.v1.ChangeQuorumCloudDatabaseAct"
    +          + "ionH\000B\010\n\006action\"\245\001\n\036CreateUserInstanceCo"
    +          + "nfigAction\022\026\n\016user_config_id\030\001 \001(\t\022\022\n\npr"
    +          + "oject_id\030\002 \001(\t\022\026\n\016base_config_id\030\003 \001(\t\022?"
    +          + "\n\010replicas\030\004 \003(\0132-.google.spanner.admin."
    +          + "instance.v1.ReplicaInfo\"\377\001\n\036UpdateUserIn"
    +          + "stanceConfigAction\022\026\n\016user_config_id\030\001 \001"
    +          + "(\t\022\022\n\nproject_id\030\002 \001(\t\022\031\n\014display_name\030\003"
    +          + " \001(\tH\000\210\001\001\022V\n\006labels\030\004 \003(\0132F.google.spann"
    +          + "er.executor.v1.UpdateUserInstanceConfigA"
    +          + "ction.LabelsEntry\032-\n\013LabelsEntry\022\013\n\003key\030"
    +          + "\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001B\017\n\r_display_nam"
    +          + "e\"N\n\034GetCloudInstanceConfigAction\022\032\n\022ins"
    +          + "tance_config_id\030\001 \001(\t\022\022\n\nproject_id\030\002 \001("
    +          + "\t\"L\n\036DeleteUserInstanceConfigAction\022\026\n\016u"
    +          + "ser_config_id\030\001 \001(\t\022\022\n\nproject_id\030\002 \001(\t\""
    +          + "\202\001\n\036ListCloudInstanceConfigsAction\022\022\n\npr"
    +          + "oject_id\030\001 \001(\t\022\026\n\tpage_size\030\002 \001(\005H\000\210\001\001\022\027"
    +          + "\n\npage_token\030\003 \001(\tH\001\210\001\001B\014\n\n_page_sizeB\r\n"
    +          + "\013_page_token\"\253\003\n\031CreateCloudInstanceActi"
    +          + "on\022\023\n\013instance_id\030\001 \001(\t\022\022\n\nproject_id\030\002 "
    +          + "\001(\t\022\032\n\022instance_config_id\030\003 \001(\t\022\027\n\nnode_"
    +          + "count\030\004 \001(\005H\000\210\001\001\022\035\n\020processing_units\030\006 \001"
    +          + "(\005H\001\210\001\001\022T\n\022autoscaling_config\030\007 \001(\01323.go"
    +          + "ogle.spanner.admin.instance.v1.Autoscali"
    +          + "ngConfigH\002\210\001\001\022Q\n\006labels\030\005 \003(\0132A.google.s"
    +          + "panner.executor.v1.CreateCloudInstanceAc"
    +          + "tion.LabelsEntry\032-\n\013LabelsEntry\022\013\n\003key\030\001"
    +          + " \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001B\r\n\013_node_countB\023"
    +          + "\n\021_processing_unitsB\025\n\023_autoscaling_conf"
    +          + "ig\"\273\003\n\031UpdateCloudInstanceAction\022\023\n\013inst"
    +          + "ance_id\030\001 \001(\t\022\022\n\nproject_id\030\002 \001(\t\022\031\n\014dis"
    +          + "play_name\030\003 \001(\tH\000\210\001\001\022\027\n\nnode_count\030\004 \001(\005"
    +          + "H\001\210\001\001\022\035\n\020processing_units\030\005 \001(\005H\002\210\001\001\022T\n\022"
    +          + "autoscaling_config\030\007 \001(\01323.google.spanne"
    +          + "r.admin.instance.v1.AutoscalingConfigH\003\210"
    +          + "\001\001\022Q\n\006labels\030\006 \003(\0132A.google.spanner.exec"
    +          + "utor.v1.UpdateCloudInstanceAction.Labels"
    +          + "Entry\032-\n\013LabelsEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005val"
    +          + "ue\030\002 \001(\t:\0028\001B\017\n\r_display_nameB\r\n\013_node_c"
    +          + "ountB\023\n\021_processing_unitsB\025\n\023_autoscalin"
    +          + "g_config\"D\n\031DeleteCloudInstanceAction\022\023\n"
    +          + "\013instance_id\030\001 \001(\t\022\022\n\nproject_id\030\002 \001(\t\"\227"
    +          + "\002\n\031CreateCloudDatabaseAction\022\023\n\013instance"
    +          + "_id\030\001 \001(\t\022\022\n\nproject_id\030\002 \001(\t\022\023\n\013databas"
    +          + "e_id\030\003 \001(\t\022\025\n\rsdl_statement\030\004 \003(\t\022M\n\021enc"
    +          + "ryption_config\030\005 \001(\01322.google.spanner.ad"
    +          + "min.database.v1.EncryptionConfig\022\024\n\007dial"
    +          + "ect\030\006 \001(\tH\000\210\001\001\022\036\n\021proto_descriptors\030\007 \001("
    +          + "\014H\001\210\001\001B\n\n\010_dialectB\024\n\022_proto_descriptors"
    +          + "\"\277\001\n\034UpdateCloudDatabaseDdlAction\022\023\n\013ins"
    +          + "tance_id\030\001 \001(\t\022\022\n\nproject_id\030\002 \001(\t\022\023\n\013da"
    +          + "tabase_id\030\003 \001(\t\022\025\n\rsdl_statement\030\004 \003(\t\022\024"
    +          + "\n\014operation_id\030\005 \001(\t\022\036\n\021proto_descriptor"
    +          + "s\030\006 \001(\014H\000\210\001\001B\024\n\022_proto_descriptors\"{\n\031Up"
    +          + "dateCloudDatabaseAction\022\023\n\013instance_id\030\001"
    +          + " \001(\t\022\022\n\nproject_id\030\002 \001(\t\022\025\n\rdatabase_nam"
    +          + "e\030\003 \001(\t\022\036\n\026enable_drop_protection\030\004 \001(\010\""
    +          + "W\n\027DropCloudDatabaseAction\022\023\n\013instance_i"
    +          + "d\030\001 \001(\t\022\022\n\nproject_id\030\002 \001(\t\022\023\n\013database_"
    +          + "id\030\003 \001(\t\"h\n\037ChangeQuorumCloudDatabaseAct"
    +          + "ion\022\031\n\014database_uri\030\001 \001(\tH\000\210\001\001\022\031\n\021servin"
    +          + "g_locations\030\002 \003(\tB\017\n\r_database_uri\"j\n\030Li"
    +          + "stCloudDatabasesAction\022\022\n\nproject_id\030\001 \001"
    +          + "(\t\022\023\n\013instance_id\030\002 \001(\t\022\021\n\tpage_size\030\003 \001"
    +          + "(\005\022\022\n\npage_token\030\004 \001(\t\"\234\001\n\030ListCloudInst"
    +          + "ancesAction\022\022\n\nproject_id\030\001 \001(\t\022\023\n\006filte"
    +          + "r\030\002 \001(\tH\000\210\001\001\022\026\n\tpage_size\030\003 \001(\005H\001\210\001\001\022\027\n\n"
    +          + "page_token\030\004 \001(\tH\002\210\001\001B\t\n\007_filterB\014\n\n_pag"
    +          + "e_sizeB\r\n\013_page_token\"A\n\026GetCloudInstanc"
    +          + "eAction\022\022\n\nproject_id\030\001 \001(\t\022\023\n\013instance_"
    +          + "id\030\002 \001(\t\"\203\001\n!ListCloudDatabaseOperations"
    +          + "Action\022\022\n\nproject_id\030\001 \001(\t\022\023\n\013instance_i"
    +          + "d\030\002 \001(\t\022\016\n\006filter\030\003 \001(\t\022\021\n\tpage_size\030\004 \001"
    +          + "(\005\022\022\n\npage_token\030\005 \001(\t\"\341\001\n\032RestoreCloudD"
    +          + "atabaseAction\022\022\n\nproject_id\030\001 \001(\t\022\032\n\022bac"
    +          + "kup_instance_id\030\002 \001(\t\022\021\n\tbackup_id\030\003 \001(\t"
    +          + "\022\034\n\024database_instance_id\030\004 \001(\t\022\023\n\013databa"
    +          + "se_id\030\005 \001(\t\022M\n\021encryption_config\030\007 \001(\01322"
    +          + ".google.spanner.admin.database.v1.Encryp"
    +          + "tionConfig\"V\n\026GetCloudDatabaseAction\022\022\n\n"
    +          + "project_id\030\001 \001(\t\022\023\n\013instance_id\030\002 \001(\t\022\023\n"
    +          + "\013database_id\030\003 \001(\t\"\267\002\n\027CreateCloudBackup"
    +          + "Action\022\022\n\nproject_id\030\001 \001(\t\022\023\n\013instance_i"
    +          + "d\030\002 \001(\t\022\021\n\tbackup_id\030\003 \001(\t\022\023\n\013database_i"
    +          + "d\030\004 \001(\t\0224\n\013expire_time\030\005 \001(\0132\032.google.pr"
    +          + "otobuf.TimestampB\003\340A\003\0225\n\014version_time\030\006 "
    +          + "\001(\0132\032.google.protobuf.TimestampH\000\210\001\001\022M\n\021"
    +          + "encryption_config\030\007 \001(\01322.google.spanner"
    +          + ".admin.database.v1.EncryptionConfigB\017\n\r_"
    +          + "version_time\"\240\001\n\025CopyCloudBackupAction\022\022"
    +          + "\n\nproject_id\030\001 \001(\t\022\023\n\013instance_id\030\002 \001(\t\022"
    +          + "\021\n\tbackup_id\030\003 \001(\t\022\025\n\rsource_backup\030\004 \001("
    +          + "\t\0224\n\013expire_time\030\005 \001(\0132\032.google.protobuf"
    +          + ".TimestampB\003\340A\003\"R\n\024GetCloudBackupAction\022"
    +          + "\022\n\nproject_id\030\001 \001(\t\022\023\n\013instance_id\030\002 \001(\t"
    +          + "\022\021\n\tbackup_id\030\003 \001(\t\"\213\001\n\027UpdateCloudBacku"
    +          + "pAction\022\022\n\nproject_id\030\001 \001(\t\022\023\n\013instance_"
    +          + "id\030\002 \001(\t\022\021\n\tbackup_id\030\003 \001(\t\0224\n\013expire_ti"
    +          + "me\030\004 \001(\0132\032.google.protobuf.TimestampB\003\340A"
    +          + "\003\"U\n\027DeleteCloudBackupAction\022\022\n\nproject_"
    +          + "id\030\001 \001(\t\022\023\n\013instance_id\030\002 \001(\t\022\021\n\tbackup_"
    +          + "id\030\003 \001(\t\"x\n\026ListCloudBackupsAction\022\022\n\npr"
    +          + "oject_id\030\001 \001(\t\022\023\n\013instance_id\030\002 \001(\t\022\016\n\006f"
    +          + "ilter\030\003 \001(\t\022\021\n\tpage_size\030\004 \001(\005\022\022\n\npage_t"
    +          + "oken\030\005 \001(\t\"\201\001\n\037ListCloudBackupOperations"
    +          + "Action\022\022\n\nproject_id\030\001 \001(\t\022\023\n\013instance_i"
    +          + "d\030\002 \001(\t\022\016\n\006filter\030\003 \001(\t\022\021\n\tpage_size\030\004 \001"
    +          + "(\005\022\022\n\npage_token\030\005 \001(\t\"\'\n\022GetOperationAc"
    +          + "tion\022\021\n\toperation\030\001 \001(\t\"I\n\027QueryCancella"
    +          + "tionAction\022\030\n\020long_running_sql\030\001 \001(\t\022\024\n\014"
    +          + "cancel_query\030\002 \001(\t\"*\n\025CancelOperationAct"
    +          + "ion\022\021\n\toperation\030\001 \001(\t\"\210\001\n\033StartBatchTra"
    +          + "nsactionAction\0224\n\016batch_txn_time\030\001 \001(\0132\032"
    +          + ".google.protobuf.TimestampH\000\022\r\n\003tid\030\002 \001("
    +          + "\014H\000\022\033\n\023cloud_database_role\030\003 \001(\tB\007\n\005para"
    +          + "m\".\n\033CloseBatchTransactionAction\022\017\n\007clea"
    +          + "nup\030\001 \001(\010\"\227\002\n!GenerateDbPartitionsForRea"
    +          + "dAction\0224\n\004read\030\001 \001(\0132&.google.spanner.e"
    +          + "xecutor.v1.ReadAction\0228\n\005table\030\002 \003(\0132).g"
    +          + "oogle.spanner.executor.v1.TableMetadata\022"
    +          + "(\n\033desired_bytes_per_partition\030\003 \001(\003H\000\210\001"
    +          + "\001\022 \n\023max_partition_count\030\004 \001(\003H\001\210\001\001B\036\n\034_"
    +          + "desired_bytes_per_partitionB\026\n\024_max_part"
    +          + "ition_count\"\246\001\n\"GenerateDbPartitionsForQ"
    +          + "ueryAction\0226\n\005query\030\001 \001(\0132\'.google.spann"
    +          + "er.executor.v1.QueryAction\022(\n\033desired_by"
    +          + "tes_per_partition\030\002 \001(\003H\000\210\001\001B\036\n\034_desired"
    +          + "_bytes_per_partition\"x\n\016BatchPartition\022\021"
    +          + "\n\tpartition\030\001 \001(\014\022\027\n\017partition_token\030\002 \001"
    +          + "(\014\022\022\n\005table\030\003 \001(\tH\000\210\001\001\022\022\n\005index\030\004 \001(\tH\001\210"
    +          + "\001\001B\010\n\006_tableB\010\n\006_index\"W\n\026ExecutePartiti"
    +          + "onAction\022=\n\tpartition\030\001 \001(\0132*.google.spa"
    +          + "nner.executor.v1.BatchPartition\"\216\003\n\030Exec"
    +          + "uteChangeStreamQuery\022\014\n\004name\030\001 \001(\t\022.\n\nst"
    +          + "art_time\030\002 \001(\0132\032.google.protobuf.Timesta"
    +          + "mp\0221\n\010end_time\030\003 \001(\0132\032.google.protobuf.T"
    +          + "imestampH\000\210\001\001\022\034\n\017partition_token\030\004 \001(\tH\001"
    +          + "\210\001\001\022\024\n\014read_options\030\005 \003(\t\022#\n\026heartbeat_m"
    +          + "illiseconds\030\006 \001(\005H\002\210\001\001\022\035\n\020deadline_secon"
    +          + "ds\030\007 \001(\003H\003\210\001\001\022 \n\023cloud_database_role\030\010 \001"
    +          + "(\tH\004\210\001\001B\013\n\t_end_timeB\022\n\020_partition_token"
    +          + "B\031\n\027_heartbeat_millisecondsB\023\n\021_deadline"
    +          + "_secondsB\026\n\024_cloud_database_role\"\242\005\n\024Spa"
    +          + "nnerActionOutcome\022\'\n\006status\030\001 \001(\0132\022.goog"
    +          + "le.rpc.StatusH\000\210\001\001\0224\n\013commit_time\030\002 \001(\0132"
    +          + "\032.google.protobuf.TimestampH\001\210\001\001\022@\n\013read"
    +          + "_result\030\003 \001(\0132&.google.spanner.executor."
    +          + "v1.ReadResultH\002\210\001\001\022B\n\014query_result\030\004 \001(\013"
    +          + "2\'.google.spanner.executor.v1.QueryResul"
    +          + "tH\003\210\001\001\022\"\n\025transaction_restarted\030\005 \001(\010H\004\210"
    +          + "\001\001\022\031\n\014batch_txn_id\030\006 \001(\014H\005\210\001\001\022@\n\014db_part"
    +          + "ition\030\007 \003(\0132*.google.spanner.executor.v1"
    +          + ".BatchPartition\022B\n\014admin_result\030\010 \001(\0132\'."
    +          + "google.spanner.executor.v1.AdminResultH\006"
    +          + "\210\001\001\022\031\n\021dml_rows_modified\030\t \003(\003\022M\n\025change"
    +          + "_stream_records\030\n \003(\0132..google.spanner.e"
    +          + "xecutor.v1.ChangeStreamRecordB\t\n\007_status"
    +          + "B\016\n\014_commit_timeB\016\n\014_read_resultB\017\n\r_que"
    +          + "ry_resultB\030\n\026_transaction_restartedB\017\n\r_"
    +          + "batch_txn_idB\017\n\r_admin_result\"\231\003\n\013AdminR"
    +          + "esult\022H\n\017backup_response\030\001 \001(\0132/.google."
    +          + "spanner.executor.v1.CloudBackupResponse\022"
    +          + "I\n\022operation_response\030\002 \001(\0132-.google.spa"
    +          + "nner.executor.v1.OperationResponse\022L\n\021da"
    +          + "tabase_response\030\003 \001(\01321.google.spanner.e"
    +          + "xecutor.v1.CloudDatabaseResponse\022L\n\021inst"
    +          + "ance_response\030\004 \001(\01321.google.spanner.exe"
    +          + "cutor.v1.CloudInstanceResponse\022Y\n\030instan"
    +          + "ce_config_response\030\005 \001(\01327.google.spanne"
    +          + "r.executor.v1.CloudInstanceConfigRespons"
    +          + "e\"\353\001\n\023CloudBackupResponse\022@\n\016listed_back"
    +          + "ups\030\001 \003(\0132(.google.spanner.admin.databas"
    +          + "e.v1.Backup\022?\n\030listed_backup_operations\030"
    +          + "\002 \003(\0132\035.google.longrunning.Operation\022\027\n\017"
    +          + "next_page_token\030\003 \001(\t\0228\n\006backup\030\004 \001(\0132(."
    +          + "google.spanner.admin.database.v1.Backup\""
    +          + "\230\001\n\021OperationResponse\0228\n\021listed_operatio"
    +          + "ns\030\001 \003(\0132\035.google.longrunning.Operation\022"
    +          + "\027\n\017next_page_token\030\002 \001(\t\0220\n\toperation\030\003 "
    +          + "\001(\0132\035.google.longrunning.Operation\"\264\001\n\025C"
    +          + "loudInstanceResponse\022D\n\020listed_instances"
    +          + "\030\001 \003(\0132*.google.spanner.admin.instance.v"
    +          + "1.Instance\022\027\n\017next_page_token\030\002 \001(\t\022<\n\010i"
    +          + "nstance\030\003 \001(\0132*.google.spanner.admin.ins"
    +          + "tance.v1.Instance\"\324\001\n\033CloudInstanceConfi"
    +          + "gResponse\022Q\n\027listed_instance_configs\030\001 \003"
    +          + "(\01320.google.spanner.admin.instance.v1.In"
    +          + "stanceConfig\022\027\n\017next_page_token\030\002 \001(\t\022I\n"
    +          + "\017instance_config\030\003 \001(\01320.google.spanner."
    +          + "admin.instance.v1.InstanceConfig\"\367\001\n\025Clo"
    +          + "udDatabaseResponse\022D\n\020listed_databases\030\001"
    +          + " \003(\0132*.google.spanner.admin.database.v1.",
    +      "Database\022A\n\032listed_database_operations\030\002"
    +          + " \003(\0132\035.google.longrunning.Operation\022\027\n\017n"
    +          + "ext_page_token\030\003 \001(\t\022<\n\010database\030\004 \001(\0132*"
    +          + ".google.spanner.admin.database.v1.Databa"
    +          + "se\"\336\001\n\nReadResult\022\r\n\005table\030\001 \001(\t\022\022\n\005inde"
    +          + "x\030\002 \001(\tH\000\210\001\001\022\032\n\rrequest_index\030\003 \001(\005H\001\210\001\001"
    +          + "\0222\n\003row\030\004 \003(\0132%.google.spanner.executor."
    +          + "v1.ValueList\0224\n\010row_type\030\005 \001(\0132\035.google."
    +          + "spanner.v1.StructTypeH\002\210\001\001B\010\n\006_indexB\020\n\016"
    +          + "_request_indexB\013\n\t_row_type\"\204\001\n\013QueryRes"
    +          + "ult\0222\n\003row\030\001 \003(\0132%.google.spanner.execut"
    +          + "or.v1.ValueList\0224\n\010row_type\030\002 \001(\0132\035.goog"
    +          + "le.spanner.v1.StructTypeH\000\210\001\001B\013\n\t_row_ty"
    +          + "pe\"\363\001\n\022ChangeStreamRecord\022C\n\013data_change"
    +          + "\030\001 \001(\0132,.google.spanner.executor.v1.Data"
    +          + "ChangeRecordH\000\022L\n\017child_partition\030\002 \001(\0132"
    +          + "1.google.spanner.executor.v1.ChildPartit"
    +          + "ionsRecordH\000\022@\n\theartbeat\030\003 \001(\0132+.google"
    +          + ".spanner.executor.v1.HeartbeatRecordH\000B\010"
    +          + "\n\006record\"\330\004\n\020DataChangeRecord\022/\n\013commit_"
    +          + "time\030\001 \001(\0132\032.google.protobuf.Timestamp\022\027"
    +          + "\n\017record_sequence\030\002 \001(\t\022\026\n\016transaction_i"
    +          + "d\030\003 \001(\t\022\026\n\016is_last_record\030\004 \001(\010\022\r\n\005table"
    +          + "\030\005 \001(\t\022M\n\014column_types\030\006 \003(\01327.google.sp"
    +          + "anner.executor.v1.DataChangeRecord.Colum"
    +          + "nType\022>\n\004mods\030\007 \003(\01320.google.spanner.exe"
    +          + "cutor.v1.DataChangeRecord.Mod\022\020\n\010mod_typ"
    +          + "e\030\010 \001(\t\022\032\n\022value_capture_type\030\t \001(\t\022\024\n\014r"
    +          + "ecord_count\030\n \001(\003\022\027\n\017partition_count\030\013 \001"
    +          + "(\003\022\027\n\017transaction_tag\030\014 \001(\t\022\035\n\025is_system"
    +          + "_transaction\030\r \001(\010\032Z\n\nColumnType\022\014\n\004name"
    +          + "\030\001 \001(\t\022\014\n\004type\030\002 \001(\t\022\026\n\016is_primary_key\030\003"
    +          + " \001(\010\022\030\n\020ordinal_position\030\004 \001(\003\032;\n\003Mod\022\014\n"
    +          + "\004keys\030\001 \001(\t\022\022\n\nnew_values\030\002 \001(\t\022\022\n\nold_v"
    +          + "alues\030\003 \001(\t\"\376\001\n\025ChildPartitionsRecord\022.\n"
    +          + "\nstart_time\030\001 \001(\0132\032.google.protobuf.Time"
    +          + "stamp\022\027\n\017record_sequence\030\002 \001(\t\022Z\n\020child_"
    +          + "partitions\030\003 \003(\0132@.google.spanner.execut"
    +          + "or.v1.ChildPartitionsRecord.ChildPartiti"
    +          + "on\032@\n\016ChildPartition\022\r\n\005token\030\001 \001(\t\022\037\n\027p"
    +          + "arent_partition_tokens\030\002 \003(\t\"E\n\017Heartbea"
    +          + "tRecord\0222\n\016heartbeat_time\030\001 \001(\0132\032.google"
    +          + ".protobuf.Timestamp\"^\n\016SpannerOptions\022L\n"
    +          + "\024session_pool_options\030\001 \001(\0132..google.spa"
    +          + "nner.executor.v1.SessionPoolOptions\"-\n\022S"
    +          + "essionPoolOptions\022\027\n\017use_multiplexed\030\001 \001"
    +          + "(\0102\314\001\n\024SpannerExecutorProxy\022\211\001\n\022ExecuteA"
    +          + "ctionAsync\0225.google.spanner.executor.v1."
    +          + "SpannerAsyncActionRequest\0326.google.spann"
    +          + "er.executor.v1.SpannerAsyncActionRespons"
    +          + "e\"\000(\0010\001\032(\312A%spanner-cloud-executor.googl"
    +          + "eapis.comBx\n\036com.google.spanner.executor"
    +          + ".v1B\022CloudExecutorProtoP\001Z@cloud.google."
    +          + "com/go/spanner/executor/apiv1/executorpb"
    +          + ";executorpbb\006proto3"
         };
         descriptor =
             com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom(
    @@ -873,6 +881,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
                   "GenerateDbPartitionsQuery",
                   "ExecutePartition",
                   "ExecuteChangeStreamQuery",
    +              "QueryCancellation",
                   "Action",
                 });
         internal_static_google_spanner_executor_v1_ReadAction_descriptor =
    @@ -1396,8 +1405,16 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
                 new java.lang.String[] {
                   "Operation",
                 });
    -    internal_static_google_spanner_executor_v1_CancelOperationAction_descriptor =
    +    internal_static_google_spanner_executor_v1_QueryCancellationAction_descriptor =
             getDescriptor().getMessageTypes().get(48);
    +    internal_static_google_spanner_executor_v1_QueryCancellationAction_fieldAccessorTable =
    +        new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
    +            internal_static_google_spanner_executor_v1_QueryCancellationAction_descriptor,
    +            new java.lang.String[] {
    +              "LongRunningSql", "CancelQuery",
    +            });
    +    internal_static_google_spanner_executor_v1_CancelOperationAction_descriptor =
    +        getDescriptor().getMessageTypes().get(49);
         internal_static_google_spanner_executor_v1_CancelOperationAction_fieldAccessorTable =
             new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
                 internal_static_google_spanner_executor_v1_CancelOperationAction_descriptor,
    @@ -1405,7 +1422,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
                   "Operation",
                 });
         internal_static_google_spanner_executor_v1_StartBatchTransactionAction_descriptor =
    -        getDescriptor().getMessageTypes().get(49);
    +        getDescriptor().getMessageTypes().get(50);
         internal_static_google_spanner_executor_v1_StartBatchTransactionAction_fieldAccessorTable =
             new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
                 internal_static_google_spanner_executor_v1_StartBatchTransactionAction_descriptor,
    @@ -1413,7 +1430,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
                   "BatchTxnTime", "Tid", "CloudDatabaseRole", "Param",
                 });
         internal_static_google_spanner_executor_v1_CloseBatchTransactionAction_descriptor =
    -        getDescriptor().getMessageTypes().get(50);
    +        getDescriptor().getMessageTypes().get(51);
         internal_static_google_spanner_executor_v1_CloseBatchTransactionAction_fieldAccessorTable =
             new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
                 internal_static_google_spanner_executor_v1_CloseBatchTransactionAction_descriptor,
    @@ -1421,7 +1438,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
                   "Cleanup",
                 });
         internal_static_google_spanner_executor_v1_GenerateDbPartitionsForReadAction_descriptor =
    -        getDescriptor().getMessageTypes().get(51);
    +        getDescriptor().getMessageTypes().get(52);
         internal_static_google_spanner_executor_v1_GenerateDbPartitionsForReadAction_fieldAccessorTable =
             new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
                 internal_static_google_spanner_executor_v1_GenerateDbPartitionsForReadAction_descriptor,
    @@ -1429,7 +1446,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
                   "Read", "Table", "DesiredBytesPerPartition", "MaxPartitionCount",
                 });
         internal_static_google_spanner_executor_v1_GenerateDbPartitionsForQueryAction_descriptor =
    -        getDescriptor().getMessageTypes().get(52);
    +        getDescriptor().getMessageTypes().get(53);
         internal_static_google_spanner_executor_v1_GenerateDbPartitionsForQueryAction_fieldAccessorTable =
             new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
                 internal_static_google_spanner_executor_v1_GenerateDbPartitionsForQueryAction_descriptor,
    @@ -1437,7 +1454,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
                   "Query", "DesiredBytesPerPartition",
                 });
         internal_static_google_spanner_executor_v1_BatchPartition_descriptor =
    -        getDescriptor().getMessageTypes().get(53);
    +        getDescriptor().getMessageTypes().get(54);
         internal_static_google_spanner_executor_v1_BatchPartition_fieldAccessorTable =
             new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
                 internal_static_google_spanner_executor_v1_BatchPartition_descriptor,
    @@ -1445,7 +1462,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
                   "Partition", "PartitionToken", "Table", "Index",
                 });
         internal_static_google_spanner_executor_v1_ExecutePartitionAction_descriptor =
    -        getDescriptor().getMessageTypes().get(54);
    +        getDescriptor().getMessageTypes().get(55);
         internal_static_google_spanner_executor_v1_ExecutePartitionAction_fieldAccessorTable =
             new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
                 internal_static_google_spanner_executor_v1_ExecutePartitionAction_descriptor,
    @@ -1453,7 +1470,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
                   "Partition",
                 });
         internal_static_google_spanner_executor_v1_ExecuteChangeStreamQuery_descriptor =
    -        getDescriptor().getMessageTypes().get(55);
    +        getDescriptor().getMessageTypes().get(56);
         internal_static_google_spanner_executor_v1_ExecuteChangeStreamQuery_fieldAccessorTable =
             new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
                 internal_static_google_spanner_executor_v1_ExecuteChangeStreamQuery_descriptor,
    @@ -1468,7 +1485,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
                   "CloudDatabaseRole",
                 });
         internal_static_google_spanner_executor_v1_SpannerActionOutcome_descriptor =
    -        getDescriptor().getMessageTypes().get(56);
    +        getDescriptor().getMessageTypes().get(57);
         internal_static_google_spanner_executor_v1_SpannerActionOutcome_fieldAccessorTable =
             new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
                 internal_static_google_spanner_executor_v1_SpannerActionOutcome_descriptor,
    @@ -1485,7 +1502,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
                   "ChangeStreamRecords",
                 });
         internal_static_google_spanner_executor_v1_AdminResult_descriptor =
    -        getDescriptor().getMessageTypes().get(57);
    +        getDescriptor().getMessageTypes().get(58);
         internal_static_google_spanner_executor_v1_AdminResult_fieldAccessorTable =
             new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
                 internal_static_google_spanner_executor_v1_AdminResult_descriptor,
    @@ -1497,7 +1514,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
                   "InstanceConfigResponse",
                 });
         internal_static_google_spanner_executor_v1_CloudBackupResponse_descriptor =
    -        getDescriptor().getMessageTypes().get(58);
    +        getDescriptor().getMessageTypes().get(59);
         internal_static_google_spanner_executor_v1_CloudBackupResponse_fieldAccessorTable =
             new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
                 internal_static_google_spanner_executor_v1_CloudBackupResponse_descriptor,
    @@ -1505,7 +1522,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
                   "ListedBackups", "ListedBackupOperations", "NextPageToken", "Backup",
                 });
         internal_static_google_spanner_executor_v1_OperationResponse_descriptor =
    -        getDescriptor().getMessageTypes().get(59);
    +        getDescriptor().getMessageTypes().get(60);
         internal_static_google_spanner_executor_v1_OperationResponse_fieldAccessorTable =
             new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
                 internal_static_google_spanner_executor_v1_OperationResponse_descriptor,
    @@ -1513,7 +1530,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
                   "ListedOperations", "NextPageToken", "Operation",
                 });
         internal_static_google_spanner_executor_v1_CloudInstanceResponse_descriptor =
    -        getDescriptor().getMessageTypes().get(60);
    +        getDescriptor().getMessageTypes().get(61);
         internal_static_google_spanner_executor_v1_CloudInstanceResponse_fieldAccessorTable =
             new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
                 internal_static_google_spanner_executor_v1_CloudInstanceResponse_descriptor,
    @@ -1521,7 +1538,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
                   "ListedInstances", "NextPageToken", "Instance",
                 });
         internal_static_google_spanner_executor_v1_CloudInstanceConfigResponse_descriptor =
    -        getDescriptor().getMessageTypes().get(61);
    +        getDescriptor().getMessageTypes().get(62);
         internal_static_google_spanner_executor_v1_CloudInstanceConfigResponse_fieldAccessorTable =
             new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
                 internal_static_google_spanner_executor_v1_CloudInstanceConfigResponse_descriptor,
    @@ -1529,7 +1546,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
                   "ListedInstanceConfigs", "NextPageToken", "InstanceConfig",
                 });
         internal_static_google_spanner_executor_v1_CloudDatabaseResponse_descriptor =
    -        getDescriptor().getMessageTypes().get(62);
    +        getDescriptor().getMessageTypes().get(63);
         internal_static_google_spanner_executor_v1_CloudDatabaseResponse_fieldAccessorTable =
             new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
                 internal_static_google_spanner_executor_v1_CloudDatabaseResponse_descriptor,
    @@ -1537,7 +1554,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
                   "ListedDatabases", "ListedDatabaseOperations", "NextPageToken", "Database",
                 });
         internal_static_google_spanner_executor_v1_ReadResult_descriptor =
    -        getDescriptor().getMessageTypes().get(63);
    +        getDescriptor().getMessageTypes().get(64);
         internal_static_google_spanner_executor_v1_ReadResult_fieldAccessorTable =
             new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
                 internal_static_google_spanner_executor_v1_ReadResult_descriptor,
    @@ -1545,7 +1562,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
                   "Table", "Index", "RequestIndex", "Row", "RowType",
                 });
         internal_static_google_spanner_executor_v1_QueryResult_descriptor =
    -        getDescriptor().getMessageTypes().get(64);
    +        getDescriptor().getMessageTypes().get(65);
         internal_static_google_spanner_executor_v1_QueryResult_fieldAccessorTable =
             new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
                 internal_static_google_spanner_executor_v1_QueryResult_descriptor,
    @@ -1553,7 +1570,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
                   "Row", "RowType",
                 });
         internal_static_google_spanner_executor_v1_ChangeStreamRecord_descriptor =
    -        getDescriptor().getMessageTypes().get(65);
    +        getDescriptor().getMessageTypes().get(66);
         internal_static_google_spanner_executor_v1_ChangeStreamRecord_fieldAccessorTable =
             new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
                 internal_static_google_spanner_executor_v1_ChangeStreamRecord_descriptor,
    @@ -1561,7 +1578,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
                   "DataChange", "ChildPartition", "Heartbeat", "Record",
                 });
         internal_static_google_spanner_executor_v1_DataChangeRecord_descriptor =
    -        getDescriptor().getMessageTypes().get(66);
    +        getDescriptor().getMessageTypes().get(67);
         internal_static_google_spanner_executor_v1_DataChangeRecord_fieldAccessorTable =
             new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
                 internal_static_google_spanner_executor_v1_DataChangeRecord_descriptor,
    @@ -1601,7 +1618,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
                   "Keys", "NewValues", "OldValues",
                 });
         internal_static_google_spanner_executor_v1_ChildPartitionsRecord_descriptor =
    -        getDescriptor().getMessageTypes().get(67);
    +        getDescriptor().getMessageTypes().get(68);
         internal_static_google_spanner_executor_v1_ChildPartitionsRecord_fieldAccessorTable =
             new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
                 internal_static_google_spanner_executor_v1_ChildPartitionsRecord_descriptor,
    @@ -1619,7 +1636,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
                   "Token", "ParentPartitionTokens",
                 });
         internal_static_google_spanner_executor_v1_HeartbeatRecord_descriptor =
    -        getDescriptor().getMessageTypes().get(68);
    +        getDescriptor().getMessageTypes().get(69);
         internal_static_google_spanner_executor_v1_HeartbeatRecord_fieldAccessorTable =
             new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
                 internal_static_google_spanner_executor_v1_HeartbeatRecord_descriptor,
    @@ -1627,7 +1644,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
                   "HeartbeatTime",
                 });
         internal_static_google_spanner_executor_v1_SpannerOptions_descriptor =
    -        getDescriptor().getMessageTypes().get(69);
    +        getDescriptor().getMessageTypes().get(70);
         internal_static_google_spanner_executor_v1_SpannerOptions_fieldAccessorTable =
             new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
                 internal_static_google_spanner_executor_v1_SpannerOptions_descriptor,
    @@ -1635,7 +1652,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
                   "SessionPoolOptions",
                 });
         internal_static_google_spanner_executor_v1_SessionPoolOptions_descriptor =
    -        getDescriptor().getMessageTypes().get(70);
    +        getDescriptor().getMessageTypes().get(71);
         internal_static_google_spanner_executor_v1_SessionPoolOptions_fieldAccessorTable =
             new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
                 internal_static_google_spanner_executor_v1_SessionPoolOptions_descriptor,
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceConfigResponse.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceConfigResponse.java
    index f84ebefb39f..67190d5fb20 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceConfigResponse.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceConfigResponse.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceConfigResponseOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceConfigResponseOrBuilder.java
    index 32eb9e0ce3a..4d390a8c193 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceConfigResponseOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceConfigResponseOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface CloudInstanceConfigResponseOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceResponse.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceResponse.java
    index 0b22ffab5fc..5daa23c11dc 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceResponse.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceResponse.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceResponseOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceResponseOrBuilder.java
    index f2abf624be7..8743f534a55 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceResponseOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CloudInstanceResponseOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface CloudInstanceResponseOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ColumnMetadata.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ColumnMetadata.java
    index 5829ad8ca0a..efa4897ed23 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ColumnMetadata.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ColumnMetadata.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ColumnMetadataOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ColumnMetadataOrBuilder.java
    index a6abcdcee5a..49102eda637 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ColumnMetadataOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ColumnMetadataOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface ColumnMetadataOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/Concurrency.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/Concurrency.java
    index d6a2b8aad4f..9adeb54dee2 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/Concurrency.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/Concurrency.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ConcurrencyOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ConcurrencyOrBuilder.java
    index ac0ba53cef1..0ded5469c65 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ConcurrencyOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ConcurrencyOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface ConcurrencyOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CopyCloudBackupAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CopyCloudBackupAction.java
    index d5cdd80a3f3..dcf46e36608 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CopyCloudBackupAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CopyCloudBackupAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CopyCloudBackupActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CopyCloudBackupActionOrBuilder.java
    index 762201fca48..0d41bf0e713 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CopyCloudBackupActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CopyCloudBackupActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface CopyCloudBackupActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudBackupAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudBackupAction.java
    index 9717388a0b0..a1ccd28f481 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudBackupAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudBackupAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudBackupActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudBackupActionOrBuilder.java
    index 54bde8e017e..16396ec7031 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudBackupActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudBackupActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface CreateCloudBackupActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudDatabaseAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudDatabaseAction.java
    index d53d8e768cb..8bea5889e35 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudDatabaseAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudDatabaseAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudDatabaseActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudDatabaseActionOrBuilder.java
    index ecfd9d2680f..497b65feea1 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudDatabaseActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudDatabaseActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface CreateCloudDatabaseActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudInstanceAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudInstanceAction.java
    index fcbfb40081f..72cd94889bf 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudInstanceAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudInstanceAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudInstanceActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudInstanceActionOrBuilder.java
    index d41976b7090..cab30a9d925 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudInstanceActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateCloudInstanceActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface CreateCloudInstanceActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateUserInstanceConfigAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateUserInstanceConfigAction.java
    index 968fe00db0e..f81a3df41bc 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateUserInstanceConfigAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateUserInstanceConfigAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateUserInstanceConfigActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateUserInstanceConfigActionOrBuilder.java
    index 4915cf17e17..74cb8b47b12 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateUserInstanceConfigActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/CreateUserInstanceConfigActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface CreateUserInstanceConfigActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DataChangeRecord.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DataChangeRecord.java
    index 125085ef4b1..f8fcada48a4 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DataChangeRecord.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DataChangeRecord.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DataChangeRecordOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DataChangeRecordOrBuilder.java
    index 5f4a47d2405..c140d3a70d8 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DataChangeRecordOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DataChangeRecordOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface DataChangeRecordOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudBackupAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudBackupAction.java
    index e1467151cda..84dd934f78e 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudBackupAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudBackupAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudBackupActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudBackupActionOrBuilder.java
    index e0a3faa8885..08f6225aafc 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudBackupActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudBackupActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface DeleteCloudBackupActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudInstanceAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudInstanceAction.java
    index d8a043b6307..f295a72ab96 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudInstanceAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudInstanceAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudInstanceActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudInstanceActionOrBuilder.java
    index 77e82d97145..a5fab50c050 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudInstanceActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteCloudInstanceActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface DeleteCloudInstanceActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteUserInstanceConfigAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteUserInstanceConfigAction.java
    index dd0c7e8a15b..b3572a35abc 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteUserInstanceConfigAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteUserInstanceConfigAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteUserInstanceConfigActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteUserInstanceConfigActionOrBuilder.java
    index 9f41eacbd5f..5ba94831da6 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteUserInstanceConfigActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DeleteUserInstanceConfigActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface DeleteUserInstanceConfigActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DmlAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DmlAction.java
    index b2f96c44da4..8b0f8a21382 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DmlAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DmlAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DmlActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DmlActionOrBuilder.java
    index dea8178f498..a612bd0367e 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DmlActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DmlActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface DmlActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DropCloudDatabaseAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DropCloudDatabaseAction.java
    index bd9a1c8c613..aa5b68d1ffb 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DropCloudDatabaseAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DropCloudDatabaseAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DropCloudDatabaseActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DropCloudDatabaseActionOrBuilder.java
    index cc64a9047f9..c0cf24374b5 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DropCloudDatabaseActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/DropCloudDatabaseActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface DropCloudDatabaseActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecuteChangeStreamQuery.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecuteChangeStreamQuery.java
    index fc45647de56..35bc4936dbb 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecuteChangeStreamQuery.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecuteChangeStreamQuery.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecuteChangeStreamQueryOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecuteChangeStreamQueryOrBuilder.java
    index e3fccae9dcd..afc2fca4a3d 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecuteChangeStreamQueryOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecuteChangeStreamQueryOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface ExecuteChangeStreamQueryOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecutePartitionAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecutePartitionAction.java
    index b5a816faeb5..f69dc662c6c 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecutePartitionAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecutePartitionAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecutePartitionActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecutePartitionActionOrBuilder.java
    index e7ffff97df8..2dba093d25d 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecutePartitionActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ExecutePartitionActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface ExecutePartitionActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/FinishTransactionAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/FinishTransactionAction.java
    index 452a837de82..49796934a5d 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/FinishTransactionAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/FinishTransactionAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/FinishTransactionActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/FinishTransactionActionOrBuilder.java
    index f4b5311a7ef..195482c8757 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/FinishTransactionActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/FinishTransactionActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface FinishTransactionActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForQueryAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForQueryAction.java
    index 3707bc8f1c8..88fd4928884 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForQueryAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForQueryAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForQueryActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForQueryActionOrBuilder.java
    index 49cceebe66b..08be7723894 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForQueryActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForQueryActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface GenerateDbPartitionsForQueryActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForReadAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForReadAction.java
    index 9a8017480c7..6e9aa3e92d0 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForReadAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForReadAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForReadActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForReadActionOrBuilder.java
    index 0a7998e20fa..e059e024903 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForReadActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GenerateDbPartitionsForReadActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface GenerateDbPartitionsForReadActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudBackupAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudBackupAction.java
    index 3faf03c3fda..cc7a3d665f4 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudBackupAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudBackupAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudBackupActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudBackupActionOrBuilder.java
    index 0ae7dc25cd1..5f382c06704 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudBackupActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudBackupActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface GetCloudBackupActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudDatabaseAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudDatabaseAction.java
    index 9dac07ff304..383448c669a 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudDatabaseAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudDatabaseAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudDatabaseActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudDatabaseActionOrBuilder.java
    index c271361fb9a..192d3bc6b49 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudDatabaseActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudDatabaseActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface GetCloudDatabaseActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceAction.java
    index 00968361403..8c047998222 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceActionOrBuilder.java
    index 1169fb7ce92..15cffa46487 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface GetCloudInstanceActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceConfigAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceConfigAction.java
    index 27702feeef9..671253eefd9 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceConfigAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceConfigAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceConfigActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceConfigActionOrBuilder.java
    index e3d147dff93..c6710d56a25 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceConfigActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetCloudInstanceConfigActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface GetCloudInstanceConfigActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetOperationAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetOperationAction.java
    index b96332f66e8..527fc85c710 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetOperationAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetOperationAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetOperationActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetOperationActionOrBuilder.java
    index 3aa3a7f8a36..f08e868df4c 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetOperationActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/GetOperationActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface GetOperationActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/HeartbeatRecord.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/HeartbeatRecord.java
    index ff5e350ed2d..ce195909568 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/HeartbeatRecord.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/HeartbeatRecord.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/HeartbeatRecordOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/HeartbeatRecordOrBuilder.java
    index e75042b97c1..f664ff2818e 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/HeartbeatRecordOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/HeartbeatRecordOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface HeartbeatRecordOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeyRange.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeyRange.java
    index 69e049bb12d..6b3060318b7 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeyRange.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeyRange.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeyRangeOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeyRangeOrBuilder.java
    index 3f798a14a61..f2113b669ac 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeyRangeOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeyRangeOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface KeyRangeOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeySet.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeySet.java
    index 7cb96d5af45..5f1f5b70b4f 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeySet.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeySet.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeySetOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeySetOrBuilder.java
    index 8bca1309e00..b3d98222c13 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeySetOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/KeySetOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface KeySetOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupOperationsAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupOperationsAction.java
    index f41d7003288..6057e6c203b 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupOperationsAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupOperationsAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupOperationsActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupOperationsActionOrBuilder.java
    index a9204447fa5..a57baa78b1c 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupOperationsActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupOperationsActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface ListCloudBackupOperationsActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupsAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupsAction.java
    index f7c78646b8a..e03d2436472 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupsAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupsAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupsActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupsActionOrBuilder.java
    index 5aa5b07944f..91997a02545 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupsActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudBackupsActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface ListCloudBackupsActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabaseOperationsAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabaseOperationsAction.java
    index 1c242d29abb..8ae117cb752 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabaseOperationsAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabaseOperationsAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabaseOperationsActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabaseOperationsActionOrBuilder.java
    index 72939dd07fb..0545a14b32a 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabaseOperationsActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabaseOperationsActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface ListCloudDatabaseOperationsActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabasesAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabasesAction.java
    index 684b3c8859e..f4d3638279f 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabasesAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabasesAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabasesActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabasesActionOrBuilder.java
    index 95716e95f2b..81092b3e290 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabasesActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudDatabasesActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface ListCloudDatabasesActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstanceConfigsAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstanceConfigsAction.java
    index c755e74db73..0e3b56796f2 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstanceConfigsAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstanceConfigsAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstanceConfigsActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstanceConfigsActionOrBuilder.java
    index 493d06263ea..59dc08f669a 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstanceConfigsActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstanceConfigsActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface ListCloudInstanceConfigsActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstancesAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstancesAction.java
    index 45c89ff81fe..4c68c112e03 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstancesAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstancesAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstancesActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstancesActionOrBuilder.java
    index b225d0011ba..d095177d649 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstancesActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ListCloudInstancesActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface ListCloudInstancesActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/MutationAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/MutationAction.java
    index 67243fa243b..d2b4b02aea3 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/MutationAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/MutationAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/MutationActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/MutationActionOrBuilder.java
    index 01298f4fd85..fc5aec3be20 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/MutationActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/MutationActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface MutationActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/OperationResponse.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/OperationResponse.java
    index 9493d21f4d5..08d25a8733b 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/OperationResponse.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/OperationResponse.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/OperationResponseOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/OperationResponseOrBuilder.java
    index 2a4b0cc7458..f92c1a39701 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/OperationResponseOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/OperationResponseOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface OperationResponseOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/PartitionedUpdateAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/PartitionedUpdateAction.java
    index 066287b7869..8d6598ee5c2 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/PartitionedUpdateAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/PartitionedUpdateAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/PartitionedUpdateActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/PartitionedUpdateActionOrBuilder.java
    index c170776547f..d825815586b 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/PartitionedUpdateActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/PartitionedUpdateActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface PartitionedUpdateActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryAction.java
    index 470688d2626..f392894a231 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryAction.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryAction.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     /**
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryActionOrBuilder.java
    index 2358e8cc43a..271f50bdadf 100644
    --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryActionOrBuilder.java
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryActionOrBuilder.java
    @@ -16,7 +16,7 @@
     // Generated by the protocol buffer compiler.  DO NOT EDIT!
     // source: google/spanner/executor/v1/cloud_executor.proto
     
    -// Protobuf Java Version: 3.25.3
    +// Protobuf Java Version: 3.25.5
     package com.google.spanner.executor.v1;
     
     public interface QueryActionOrBuilder
    diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryCancellationAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryCancellationAction.java
    new file mode 100644
    index 00000000000..62cdb66e8f7
    --- /dev/null
    +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryCancellationAction.java
    @@ -0,0 +1,810 @@
    +/*
    + * Copyright 2024 Google LLC
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     https://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +// Generated by the protocol buffer compiler.  DO NOT EDIT!
    +// source: google/spanner/executor/v1/cloud_executor.proto
    +
    +// Protobuf Java Version: 3.25.5
    +package com.google.spanner.executor.v1;
    +
    +/**
    + *
    + *
    + * 
    + * Query cancellation action defines the long running query and the cancel query
    + * format depening on the Cloud database dialect.
    + * 
    + * + * Protobuf type {@code google.spanner.executor.v1.QueryCancellationAction} + */ +public final class QueryCancellationAction extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.spanner.executor.v1.QueryCancellationAction) + QueryCancellationActionOrBuilder { + private static final long serialVersionUID = 0L; + // Use QueryCancellationAction.newBuilder() to construct. + private QueryCancellationAction(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private QueryCancellationAction() { + longRunningSql_ = ""; + cancelQuery_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new QueryCancellationAction(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryCancellationAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryCancellationAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.QueryCancellationAction.class, + com.google.spanner.executor.v1.QueryCancellationAction.Builder.class); + } + + public static final int LONG_RUNNING_SQL_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object longRunningSql_ = ""; + /** + * + * + *
    +   * Long running query.
    +   * 
    + * + * string long_running_sql = 1; + * + * @return The longRunningSql. + */ + @java.lang.Override + public java.lang.String getLongRunningSql() { + java.lang.Object ref = longRunningSql_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + longRunningSql_ = s; + return s; + } + } + /** + * + * + *
    +   * Long running query.
    +   * 
    + * + * string long_running_sql = 1; + * + * @return The bytes for longRunningSql. + */ + @java.lang.Override + public com.google.protobuf.ByteString getLongRunningSqlBytes() { + java.lang.Object ref = longRunningSql_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + longRunningSql_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int CANCEL_QUERY_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object cancelQuery_ = ""; + /** + * + * + *
    +   * Format of the cancel query for the cloud database dialect.
    +   * 
    + * + * string cancel_query = 2; + * + * @return The cancelQuery. + */ + @java.lang.Override + public java.lang.String getCancelQuery() { + java.lang.Object ref = cancelQuery_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + cancelQuery_ = s; + return s; + } + } + /** + * + * + *
    +   * Format of the cancel query for the cloud database dialect.
    +   * 
    + * + * string cancel_query = 2; + * + * @return The bytes for cancelQuery. + */ + @java.lang.Override + public com.google.protobuf.ByteString getCancelQueryBytes() { + java.lang.Object ref = cancelQuery_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + cancelQuery_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(longRunningSql_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, longRunningSql_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(cancelQuery_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, cancelQuery_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(longRunningSql_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, longRunningSql_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(cancelQuery_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, cancelQuery_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.executor.v1.QueryCancellationAction)) { + return super.equals(obj); + } + com.google.spanner.executor.v1.QueryCancellationAction other = + (com.google.spanner.executor.v1.QueryCancellationAction) obj; + + if (!getLongRunningSql().equals(other.getLongRunningSql())) return false; + if (!getCancelQuery().equals(other.getCancelQuery())) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + LONG_RUNNING_SQL_FIELD_NUMBER; + hash = (53 * hash) + getLongRunningSql().hashCode(); + hash = (37 * hash) + CANCEL_QUERY_FIELD_NUMBER; + hash = (53 * hash) + getCancelQuery().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.executor.v1.QueryCancellationAction parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.QueryCancellationAction parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryCancellationAction parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.QueryCancellationAction parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryCancellationAction parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.executor.v1.QueryCancellationAction parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryCancellationAction parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.QueryCancellationAction parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryCancellationAction parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.QueryCancellationAction parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.executor.v1.QueryCancellationAction parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.executor.v1.QueryCancellationAction parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.executor.v1.QueryCancellationAction prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
    +   * Query cancellation action defines the long running query and the cancel query
    +   * format depening on the Cloud database dialect.
    +   * 
    + * + * Protobuf type {@code google.spanner.executor.v1.QueryCancellationAction} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.executor.v1.QueryCancellationAction) + com.google.spanner.executor.v1.QueryCancellationActionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryCancellationAction_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryCancellationAction_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.executor.v1.QueryCancellationAction.class, + com.google.spanner.executor.v1.QueryCancellationAction.Builder.class); + } + + // Construct using com.google.spanner.executor.v1.QueryCancellationAction.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + longRunningSql_ = ""; + cancelQuery_ = ""; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.executor.v1.CloudExecutorProto + .internal_static_google_spanner_executor_v1_QueryCancellationAction_descriptor; + } + + @java.lang.Override + public com.google.spanner.executor.v1.QueryCancellationAction getDefaultInstanceForType() { + return com.google.spanner.executor.v1.QueryCancellationAction.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.executor.v1.QueryCancellationAction build() { + com.google.spanner.executor.v1.QueryCancellationAction result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.executor.v1.QueryCancellationAction buildPartial() { + com.google.spanner.executor.v1.QueryCancellationAction result = + new com.google.spanner.executor.v1.QueryCancellationAction(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.executor.v1.QueryCancellationAction result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.longRunningSql_ = longRunningSql_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.cancelQuery_ = cancelQuery_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.executor.v1.QueryCancellationAction) { + return mergeFrom((com.google.spanner.executor.v1.QueryCancellationAction) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.executor.v1.QueryCancellationAction other) { + if (other == com.google.spanner.executor.v1.QueryCancellationAction.getDefaultInstance()) + return this; + if (!other.getLongRunningSql().isEmpty()) { + longRunningSql_ = other.longRunningSql_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getCancelQuery().isEmpty()) { + cancelQuery_ = other.cancelQuery_; + bitField0_ |= 0x00000002; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + longRunningSql_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + cancelQuery_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object longRunningSql_ = ""; + /** + * + * + *
    +     * Long running query.
    +     * 
    + * + * string long_running_sql = 1; + * + * @return The longRunningSql. + */ + public java.lang.String getLongRunningSql() { + java.lang.Object ref = longRunningSql_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + longRunningSql_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
    +     * Long running query.
    +     * 
    + * + * string long_running_sql = 1; + * + * @return The bytes for longRunningSql. + */ + public com.google.protobuf.ByteString getLongRunningSqlBytes() { + java.lang.Object ref = longRunningSql_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + longRunningSql_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
    +     * Long running query.
    +     * 
    + * + * string long_running_sql = 1; + * + * @param value The longRunningSql to set. + * @return This builder for chaining. + */ + public Builder setLongRunningSql(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + longRunningSql_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
    +     * Long running query.
    +     * 
    + * + * string long_running_sql = 1; + * + * @return This builder for chaining. + */ + public Builder clearLongRunningSql() { + longRunningSql_ = getDefaultInstance().getLongRunningSql(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
    +     * Long running query.
    +     * 
    + * + * string long_running_sql = 1; + * + * @param value The bytes for longRunningSql to set. + * @return This builder for chaining. + */ + public Builder setLongRunningSqlBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + longRunningSql_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object cancelQuery_ = ""; + /** + * + * + *
    +     * Format of the cancel query for the cloud database dialect.
    +     * 
    + * + * string cancel_query = 2; + * + * @return The cancelQuery. + */ + public java.lang.String getCancelQuery() { + java.lang.Object ref = cancelQuery_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + cancelQuery_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
    +     * Format of the cancel query for the cloud database dialect.
    +     * 
    + * + * string cancel_query = 2; + * + * @return The bytes for cancelQuery. + */ + public com.google.protobuf.ByteString getCancelQueryBytes() { + java.lang.Object ref = cancelQuery_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + cancelQuery_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
    +     * Format of the cancel query for the cloud database dialect.
    +     * 
    + * + * string cancel_query = 2; + * + * @param value The cancelQuery to set. + * @return This builder for chaining. + */ + public Builder setCancelQuery(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + cancelQuery_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
    +     * Format of the cancel query for the cloud database dialect.
    +     * 
    + * + * string cancel_query = 2; + * + * @return This builder for chaining. + */ + public Builder clearCancelQuery() { + cancelQuery_ = getDefaultInstance().getCancelQuery(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * + * + *
    +     * Format of the cancel query for the cloud database dialect.
    +     * 
    + * + * string cancel_query = 2; + * + * @param value The bytes for cancelQuery to set. + * @return This builder for chaining. + */ + public Builder setCancelQueryBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + cancelQuery_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.spanner.executor.v1.QueryCancellationAction) + } + + // @@protoc_insertion_point(class_scope:google.spanner.executor.v1.QueryCancellationAction) + private static final com.google.spanner.executor.v1.QueryCancellationAction DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.executor.v1.QueryCancellationAction(); + } + + public static com.google.spanner.executor.v1.QueryCancellationAction getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public QueryCancellationAction parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.executor.v1.QueryCancellationAction getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryCancellationActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryCancellationActionOrBuilder.java new file mode 100644 index 00000000000..bb0b958bfba --- /dev/null +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryCancellationActionOrBuilder.java @@ -0,0 +1,76 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/executor/v1/cloud_executor.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.executor.v1; + +public interface QueryCancellationActionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.executor.v1.QueryCancellationAction) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Long running query.
    +   * 
    + * + * string long_running_sql = 1; + * + * @return The longRunningSql. + */ + java.lang.String getLongRunningSql(); + /** + * + * + *
    +   * Long running query.
    +   * 
    + * + * string long_running_sql = 1; + * + * @return The bytes for longRunningSql. + */ + com.google.protobuf.ByteString getLongRunningSqlBytes(); + + /** + * + * + *
    +   * Format of the cancel query for the cloud database dialect.
    +   * 
    + * + * string cancel_query = 2; + * + * @return The cancelQuery. + */ + java.lang.String getCancelQuery(); + /** + * + * + *
    +   * Format of the cancel query for the cloud database dialect.
    +   * 
    + * + * string cancel_query = 2; + * + * @return The bytes for cancelQuery. + */ + com.google.protobuf.ByteString getCancelQueryBytes(); +} diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryResult.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryResult.java index b20d591fb79..57b043b1aea 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryResult.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryResult.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; /** diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryResultOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryResultOrBuilder.java index 4338c70ab83..aee841900ae 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryResultOrBuilder.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/QueryResultOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; public interface QueryResultOrBuilder diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadAction.java index c0d4265cd14..8d4d14438bc 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadAction.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadAction.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; /** diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadActionOrBuilder.java index c228ba3459b..f47465d3822 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadActionOrBuilder.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadActionOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; public interface ReadActionOrBuilder diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadResult.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadResult.java index efd6f80ccf5..61210f94f80 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadResult.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadResult.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; /** diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadResultOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadResultOrBuilder.java index 202d6e2d447..39d0266629b 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadResultOrBuilder.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ReadResultOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; public interface ReadResultOrBuilder diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/RestoreCloudDatabaseAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/RestoreCloudDatabaseAction.java index 45d89b31afe..5a75808ba13 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/RestoreCloudDatabaseAction.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/RestoreCloudDatabaseAction.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; /** diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/RestoreCloudDatabaseActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/RestoreCloudDatabaseActionOrBuilder.java index 3bf8f841233..c9c8435dedf 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/RestoreCloudDatabaseActionOrBuilder.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/RestoreCloudDatabaseActionOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; public interface RestoreCloudDatabaseActionOrBuilder diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SessionPoolOptions.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SessionPoolOptions.java index e6bd8b20564..248ae7f4e7c 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SessionPoolOptions.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SessionPoolOptions.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; /** diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SessionPoolOptionsOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SessionPoolOptionsOrBuilder.java index e2981a6421a..1e113fb29bd 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SessionPoolOptionsOrBuilder.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SessionPoolOptionsOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; public interface SessionPoolOptionsOrBuilder diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAction.java index 8a9025e7eb3..e0898144da3 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAction.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAction.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; /** @@ -91,6 +91,7 @@ public enum ActionCase GENERATE_DB_PARTITIONS_QUERY(43), EXECUTE_PARTITION(44), EXECUTE_CHANGE_STREAM_QUERY(50), + QUERY_CANCELLATION(51), ACTION_NOT_SET(0); private final int value; @@ -141,6 +142,8 @@ public static ActionCase forNumber(int value) { return EXECUTE_PARTITION; case 50: return EXECUTE_CHANGE_STREAM_QUERY; + case 51: + return QUERY_CANCELLATION; case 0: return ACTION_NOT_SET; default: @@ -1105,6 +1108,58 @@ public com.google.spanner.executor.v1.ExecuteChangeStreamQuery getExecuteChangeS return com.google.spanner.executor.v1.ExecuteChangeStreamQuery.getDefaultInstance(); } + public static final int QUERY_CANCELLATION_FIELD_NUMBER = 51; + /** + * + * + *
    +   * Query cancellation action for testing the cancellation of a query.
    +   * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + * + * @return Whether the queryCancellation field is set. + */ + @java.lang.Override + public boolean hasQueryCancellation() { + return actionCase_ == 51; + } + /** + * + * + *
    +   * Query cancellation action for testing the cancellation of a query.
    +   * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + * + * @return The queryCancellation. + */ + @java.lang.Override + public com.google.spanner.executor.v1.QueryCancellationAction getQueryCancellation() { + if (actionCase_ == 51) { + return (com.google.spanner.executor.v1.QueryCancellationAction) action_; + } + return com.google.spanner.executor.v1.QueryCancellationAction.getDefaultInstance(); + } + /** + * + * + *
    +   * Query cancellation action for testing the cancellation of a query.
    +   * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + */ + @java.lang.Override + public com.google.spanner.executor.v1.QueryCancellationActionOrBuilder + getQueryCancellationOrBuilder() { + if (actionCase_ == 51) { + return (com.google.spanner.executor.v1.QueryCancellationAction) action_; + } + return com.google.spanner.executor.v1.QueryCancellationAction.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -1175,6 +1230,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (actionCase_ == 50) { output.writeMessage(50, (com.google.spanner.executor.v1.ExecuteChangeStreamQuery) action_); } + if (actionCase_ == 51) { + output.writeMessage(51, (com.google.spanner.executor.v1.QueryCancellationAction) action_); + } getUnknownFields().writeTo(output); } @@ -1270,6 +1328,11 @@ public int getSerializedSize() { com.google.protobuf.CodedOutputStream.computeMessageSize( 50, (com.google.spanner.executor.v1.ExecuteChangeStreamQuery) action_); } + if (actionCase_ == 51) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 51, (com.google.spanner.executor.v1.QueryCancellationAction) action_); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -1344,6 +1407,9 @@ public boolean equals(final java.lang.Object obj) { if (!getExecuteChangeStreamQuery().equals(other.getExecuteChangeStreamQuery())) return false; break; + case 51: + if (!getQueryCancellation().equals(other.getQueryCancellation())) return false; + break; case 0: default: } @@ -1429,6 +1495,10 @@ public int hashCode() { hash = (37 * hash) + EXECUTE_CHANGE_STREAM_QUERY_FIELD_NUMBER; hash = (53 * hash) + getExecuteChangeStreamQuery().hashCode(); break; + case 51: + hash = (37 * hash) + QUERY_CANCELLATION_FIELD_NUMBER; + hash = (53 * hash) + getQueryCancellation().hashCode(); + break; case 0: default: } @@ -1636,6 +1706,9 @@ public Builder clear() { if (executeChangeStreamQueryBuilder_ != null) { executeChangeStreamQueryBuilder_.clear(); } + if (queryCancellationBuilder_ != null) { + queryCancellationBuilder_.clear(); + } actionCase_ = 0; action_ = null; return this; @@ -1738,6 +1811,9 @@ private void buildPartialOneofs(com.google.spanner.executor.v1.SpannerAction res if (actionCase_ == 50 && executeChangeStreamQueryBuilder_ != null) { result.action_ = executeChangeStreamQueryBuilder_.build(); } + if (actionCase_ == 51 && queryCancellationBuilder_ != null) { + result.action_ = queryCancellationBuilder_.build(); + } } @java.lang.Override @@ -1874,6 +1950,11 @@ public Builder mergeFrom(com.google.spanner.executor.v1.SpannerAction other) { mergeExecuteChangeStreamQuery(other.getExecuteChangeStreamQuery()); break; } + case QUERY_CANCELLATION: + { + mergeQueryCancellation(other.getQueryCancellation()); + break; + } case ACTION_NOT_SET: { break; @@ -2018,6 +2099,13 @@ public Builder mergeFrom( actionCase_ = 50; break; } // case 402 + case 410: + { + input.readMessage( + getQueryCancellationFieldBuilder().getBuilder(), extensionRegistry); + actionCase_ = 51; + break; + } // case 410 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { @@ -5801,6 +5889,220 @@ public Builder clearExecuteChangeStreamQuery() { return executeChangeStreamQueryBuilder_; } + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.executor.v1.QueryCancellationAction, + com.google.spanner.executor.v1.QueryCancellationAction.Builder, + com.google.spanner.executor.v1.QueryCancellationActionOrBuilder> + queryCancellationBuilder_; + /** + * + * + *
    +     * Query cancellation action for testing the cancellation of a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + * + * @return Whether the queryCancellation field is set. + */ + @java.lang.Override + public boolean hasQueryCancellation() { + return actionCase_ == 51; + } + /** + * + * + *
    +     * Query cancellation action for testing the cancellation of a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + * + * @return The queryCancellation. + */ + @java.lang.Override + public com.google.spanner.executor.v1.QueryCancellationAction getQueryCancellation() { + if (queryCancellationBuilder_ == null) { + if (actionCase_ == 51) { + return (com.google.spanner.executor.v1.QueryCancellationAction) action_; + } + return com.google.spanner.executor.v1.QueryCancellationAction.getDefaultInstance(); + } else { + if (actionCase_ == 51) { + return queryCancellationBuilder_.getMessage(); + } + return com.google.spanner.executor.v1.QueryCancellationAction.getDefaultInstance(); + } + } + /** + * + * + *
    +     * Query cancellation action for testing the cancellation of a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + */ + public Builder setQueryCancellation( + com.google.spanner.executor.v1.QueryCancellationAction value) { + if (queryCancellationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + action_ = value; + onChanged(); + } else { + queryCancellationBuilder_.setMessage(value); + } + actionCase_ = 51; + return this; + } + /** + * + * + *
    +     * Query cancellation action for testing the cancellation of a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + */ + public Builder setQueryCancellation( + com.google.spanner.executor.v1.QueryCancellationAction.Builder builderForValue) { + if (queryCancellationBuilder_ == null) { + action_ = builderForValue.build(); + onChanged(); + } else { + queryCancellationBuilder_.setMessage(builderForValue.build()); + } + actionCase_ = 51; + return this; + } + /** + * + * + *
    +     * Query cancellation action for testing the cancellation of a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + */ + public Builder mergeQueryCancellation( + com.google.spanner.executor.v1.QueryCancellationAction value) { + if (queryCancellationBuilder_ == null) { + if (actionCase_ == 51 + && action_ + != com.google.spanner.executor.v1.QueryCancellationAction.getDefaultInstance()) { + action_ = + com.google.spanner.executor.v1.QueryCancellationAction.newBuilder( + (com.google.spanner.executor.v1.QueryCancellationAction) action_) + .mergeFrom(value) + .buildPartial(); + } else { + action_ = value; + } + onChanged(); + } else { + if (actionCase_ == 51) { + queryCancellationBuilder_.mergeFrom(value); + } else { + queryCancellationBuilder_.setMessage(value); + } + } + actionCase_ = 51; + return this; + } + /** + * + * + *
    +     * Query cancellation action for testing the cancellation of a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + */ + public Builder clearQueryCancellation() { + if (queryCancellationBuilder_ == null) { + if (actionCase_ == 51) { + actionCase_ = 0; + action_ = null; + onChanged(); + } + } else { + if (actionCase_ == 51) { + actionCase_ = 0; + action_ = null; + } + queryCancellationBuilder_.clear(); + } + return this; + } + /** + * + * + *
    +     * Query cancellation action for testing the cancellation of a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + */ + public com.google.spanner.executor.v1.QueryCancellationAction.Builder + getQueryCancellationBuilder() { + return getQueryCancellationFieldBuilder().getBuilder(); + } + /** + * + * + *
    +     * Query cancellation action for testing the cancellation of a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + */ + @java.lang.Override + public com.google.spanner.executor.v1.QueryCancellationActionOrBuilder + getQueryCancellationOrBuilder() { + if ((actionCase_ == 51) && (queryCancellationBuilder_ != null)) { + return queryCancellationBuilder_.getMessageOrBuilder(); + } else { + if (actionCase_ == 51) { + return (com.google.spanner.executor.v1.QueryCancellationAction) action_; + } + return com.google.spanner.executor.v1.QueryCancellationAction.getDefaultInstance(); + } + } + /** + * + * + *
    +     * Query cancellation action for testing the cancellation of a query.
    +     * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.executor.v1.QueryCancellationAction, + com.google.spanner.executor.v1.QueryCancellationAction.Builder, + com.google.spanner.executor.v1.QueryCancellationActionOrBuilder> + getQueryCancellationFieldBuilder() { + if (queryCancellationBuilder_ == null) { + if (!(actionCase_ == 51)) { + action_ = com.google.spanner.executor.v1.QueryCancellationAction.getDefaultInstance(); + } + queryCancellationBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.executor.v1.QueryCancellationAction, + com.google.spanner.executor.v1.QueryCancellationAction.Builder, + com.google.spanner.executor.v1.QueryCancellationActionOrBuilder>( + (com.google.spanner.executor.v1.QueryCancellationAction) action_, + getParentForChildren(), + isClean()); + action_ = null; + } + actionCase_ = 51; + onChanged(); + return queryCancellationBuilder_; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerActionOrBuilder.java index 202abcfc871..8ac7916ade2 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerActionOrBuilder.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerActionOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; public interface SpannerActionOrBuilder @@ -669,5 +669,40 @@ public interface SpannerActionOrBuilder com.google.spanner.executor.v1.ExecuteChangeStreamQueryOrBuilder getExecuteChangeStreamQueryOrBuilder(); + /** + * + * + *
    +   * Query cancellation action for testing the cancellation of a query.
    +   * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + * + * @return Whether the queryCancellation field is set. + */ + boolean hasQueryCancellation(); + /** + * + * + *
    +   * Query cancellation action for testing the cancellation of a query.
    +   * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + * + * @return The queryCancellation. + */ + com.google.spanner.executor.v1.QueryCancellationAction getQueryCancellation(); + /** + * + * + *
    +   * Query cancellation action for testing the cancellation of a query.
    +   * 
    + * + * .google.spanner.executor.v1.QueryCancellationAction query_cancellation = 51; + */ + com.google.spanner.executor.v1.QueryCancellationActionOrBuilder getQueryCancellationOrBuilder(); + com.google.spanner.executor.v1.SpannerAction.ActionCase getActionCase(); } diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerActionOutcome.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerActionOutcome.java index a54ec1a8505..f55f6e922e2 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerActionOutcome.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerActionOutcome.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; /** diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerActionOutcomeOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerActionOutcomeOrBuilder.java index a0b28a558d7..d4cf3918de7 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerActionOutcomeOrBuilder.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerActionOutcomeOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; public interface SpannerActionOutcomeOrBuilder diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionRequest.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionRequest.java index 8fd6341e275..60eea387403 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionRequest.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; /** diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionRequestOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionRequestOrBuilder.java index 64be7d0b15a..48ba7dbab00 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionRequestOrBuilder.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; public interface SpannerAsyncActionRequestOrBuilder diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionResponse.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionResponse.java index 3a7093bc52b..3a76404589b 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionResponse.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionResponse.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; /** diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionResponseOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionResponseOrBuilder.java index 4bd8caf3b49..fca831e6295 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionResponseOrBuilder.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerAsyncActionResponseOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; public interface SpannerAsyncActionResponseOrBuilder diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerOptions.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerOptions.java index 0f8d6b2635e..9bcea7611a1 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerOptions.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerOptions.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; /** diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerOptionsOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerOptionsOrBuilder.java index c336ae68be3..6aefeac531c 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerOptionsOrBuilder.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/SpannerOptionsOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; public interface SpannerOptionsOrBuilder diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartBatchTransactionAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartBatchTransactionAction.java index c674de6de4b..ac9b62c5dab 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartBatchTransactionAction.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartBatchTransactionAction.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; /** diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartBatchTransactionActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartBatchTransactionActionOrBuilder.java index b7450cf5537..ef9cea72b07 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartBatchTransactionActionOrBuilder.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartBatchTransactionActionOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; public interface StartBatchTransactionActionOrBuilder diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartTransactionAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartTransactionAction.java index e4326119096..0664bee8b70 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartTransactionAction.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartTransactionAction.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; /** diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartTransactionActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartTransactionActionOrBuilder.java index b01fff9ec34..0bcb5020dcc 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartTransactionActionOrBuilder.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/StartTransactionActionOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; public interface StartTransactionActionOrBuilder diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TableMetadata.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TableMetadata.java index 903de924f72..82128f674e7 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TableMetadata.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TableMetadata.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; /** diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TableMetadataOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TableMetadataOrBuilder.java index ba304ba374e..1958b5a0e72 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TableMetadataOrBuilder.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TableMetadataOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; public interface TableMetadataOrBuilder diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TransactionExecutionOptions.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TransactionExecutionOptions.java index 04d1c7a45a3..8514fd9354d 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TransactionExecutionOptions.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TransactionExecutionOptions.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; /** diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TransactionExecutionOptionsOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TransactionExecutionOptionsOrBuilder.java index 1b8758a0327..ff88f0ebd17 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TransactionExecutionOptionsOrBuilder.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/TransactionExecutionOptionsOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; public interface TransactionExecutionOptionsOrBuilder diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudBackupAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudBackupAction.java index 777c77107bb..4d44c06675b 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudBackupAction.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudBackupAction.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; /** diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudBackupActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudBackupActionOrBuilder.java index 0e6e242e727..2ac6a13defe 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudBackupActionOrBuilder.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudBackupActionOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; public interface UpdateCloudBackupActionOrBuilder diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseAction.java index 35709b65a76..bf8b7c43e77 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseAction.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseAction.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; /** diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseActionOrBuilder.java index 5899ffb07ec..f9c79e1798b 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseActionOrBuilder.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseActionOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; public interface UpdateCloudDatabaseActionOrBuilder diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseDdlAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseDdlAction.java index d2c80490134..b004f1268e3 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseDdlAction.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseDdlAction.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; /** diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseDdlActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseDdlActionOrBuilder.java index 598fe63271d..4c76af44df5 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseDdlActionOrBuilder.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudDatabaseDdlActionOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; public interface UpdateCloudDatabaseDdlActionOrBuilder diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudInstanceAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudInstanceAction.java index ca7ef5038db..f03f155faf0 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudInstanceAction.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudInstanceAction.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; /** diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudInstanceActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudInstanceActionOrBuilder.java index 42dc9048fa4..18c44a301c7 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudInstanceActionOrBuilder.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateCloudInstanceActionOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; public interface UpdateCloudInstanceActionOrBuilder diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateUserInstanceConfigAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateUserInstanceConfigAction.java index 4324d826c1e..5ee5522c645 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateUserInstanceConfigAction.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateUserInstanceConfigAction.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; /** diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateUserInstanceConfigActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateUserInstanceConfigActionOrBuilder.java index 796fd0b748b..2abc3cc3404 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateUserInstanceConfigActionOrBuilder.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/UpdateUserInstanceConfigActionOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; public interface UpdateUserInstanceConfigActionOrBuilder diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/Value.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/Value.java index 129b745a967..35e5b2c248b 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/Value.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/Value.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; /** diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ValueList.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ValueList.java index 7428d74ed42..f6ffdcb4754 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ValueList.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ValueList.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; /** diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ValueListOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ValueListOrBuilder.java index 17e7a989d78..2175ae24462 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ValueListOrBuilder.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ValueListOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; public interface ValueListOrBuilder diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ValueOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ValueOrBuilder.java index 2929fb0b2ff..c2ed511edc7 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ValueOrBuilder.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/ValueOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; public interface ValueOrBuilder diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/WriteMutationsAction.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/WriteMutationsAction.java index 83cbde72742..a2069057848 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/WriteMutationsAction.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/WriteMutationsAction.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; /** diff --git a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/WriteMutationsActionOrBuilder.java b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/WriteMutationsActionOrBuilder.java index 752952b34ff..593821a9ace 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/WriteMutationsActionOrBuilder.java +++ b/proto-google-cloud-spanner-executor-v1/src/main/java/com/google/spanner/executor/v1/WriteMutationsActionOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/executor/v1/cloud_executor.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.executor.v1; public interface WriteMutationsActionOrBuilder diff --git a/proto-google-cloud-spanner-executor-v1/src/main/proto/google/spanner/executor/v1/cloud_executor.proto b/proto-google-cloud-spanner-executor-v1/src/main/proto/google/spanner/executor/v1/cloud_executor.proto index 3ad36e3ee30..05d662a5a1d 100644 --- a/proto-google-cloud-spanner-executor-v1/src/main/proto/google/spanner/executor/v1/cloud_executor.proto +++ b/proto-google-cloud-spanner-executor-v1/src/main/proto/google/spanner/executor/v1/cloud_executor.proto @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -132,6 +132,9 @@ message SpannerAction { // Action to execute change stream query. ExecuteChangeStreamQuery execute_change_stream_query = 50; + + // Query cancellation action for testing the cancellation of a query. + QueryCancellationAction query_cancellation = 51; } } @@ -1048,6 +1051,16 @@ message GetOperationAction { string operation = 1; } +// Query cancellation action defines the long running query and the cancel query +// format depening on the Cloud database dialect. +message QueryCancellationAction { + // Long running query. + string long_running_sql = 1; + + // Format of the cancel query for the cloud database dialect. + string cancel_query = 2; +} + // Action that cancels an operation. message CancelOperationAction { // The name of the operation resource to be cancelled. diff --git a/proto-google-cloud-spanner-v1/clirr-ignored-differences.xml b/proto-google-cloud-spanner-v1/clirr-ignored-differences.xml index f9ff2435339..89fd05b2e3c 100644 --- a/proto-google-cloud-spanner-v1/clirr-ignored-differences.xml +++ b/proto-google-cloud-spanner-v1/clirr-ignored-differences.xml @@ -16,4 +16,66 @@ com/google/spanner/v1/*OrBuilder boolean has*(*) + + + + 7006 + com/google/spanner/v1/** + * getDefaultInstanceForType() + ** + + + 7006 + com/google/spanner/v1/** + * addRepeatedField(*) + ** + + + 7006 + com/google/spanner/v1/** + * clear() + ** + + + 7006 + com/google/spanner/v1/** + * clearField(*) + ** + + + 7006 + com/google/spanner/v1/** + * clearOneof(*) + ** + + + 7006 + com/google/spanner/v1/** + * clone() + ** + + + 7006 + com/google/spanner/v1/** + * mergeUnknownFields(*) + ** + + + 7006 + com/google/spanner/v1/** + * setField(*) + ** + + + 7006 + com/google/spanner/v1/** + * setRepeatedField(*) + ** + + + 7006 + com/google/spanner/v1/** + * setUnknownFields(*) + ** + diff --git a/proto-google-cloud-spanner-v1/pom.xml b/proto-google-cloud-spanner-v1/pom.xml index a03f206a4b7..44d3d2da725 100644 --- a/proto-google-cloud-spanner-v1/pom.xml +++ b/proto-google-cloud-spanner-v1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-spanner-v1 - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT proto-google-cloud-spanner-v1 PROTO library for proto-google-cloud-spanner-v1 com.google.cloud google-cloud-spanner-parent - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsRequest.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsRequest.java index 260a0255fb3..e84447e47b9 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsRequest.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsRequestOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsRequestOrBuilder.java index 64ec026ddf4..5ddcc68663c 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsRequestOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface BatchCreateSessionsRequestOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsResponse.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsResponse.java index f20c62b0129..97f934e25e9 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsResponse.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsResponse.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsResponseOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsResponseOrBuilder.java index 2ec1795cb41..1e0cf80de51 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsResponseOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchCreateSessionsResponseOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface BatchCreateSessionsResponseOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteRequest.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteRequest.java index 2228fc01908..ffd38c3ab76 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteRequest.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteRequestOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteRequestOrBuilder.java index 5731c28f786..c1be4b01531 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteRequestOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface BatchWriteRequestOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteResponse.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteResponse.java index 6b02776db03..428b4abe0ff 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteResponse.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteResponse.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteResponseOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteResponseOrBuilder.java index de59b2b962b..8c51666429f 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteResponseOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BatchWriteResponseOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface BatchWriteResponseOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BeginTransactionRequest.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BeginTransactionRequest.java index 2064b7dcc41..59ef89bb161 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BeginTransactionRequest.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BeginTransactionRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** @@ -238,6 +238,74 @@ public com.google.spanner.v1.RequestOptionsOrBuilder getRequestOptionsOrBuilder( : requestOptions_; } + public static final int MUTATION_KEY_FIELD_NUMBER = 4; + private com.google.spanner.v1.Mutation mutationKey_; + /** + * + * + *
    +   * Optional. Required for read-write transactions on a multiplexed session
    +   * that commit mutations but do not perform any reads or queries. Clients
    +   * should randomly select one of the mutations from the mutation set and send
    +   * it as a part of this request.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the mutationKey field is set. + */ + @java.lang.Override + public boolean hasMutationKey() { + return ((bitField0_ & 0x00000004) != 0); + } + /** + * + * + *
    +   * Optional. Required for read-write transactions on a multiplexed session
    +   * that commit mutations but do not perform any reads or queries. Clients
    +   * should randomly select one of the mutations from the mutation set and send
    +   * it as a part of this request.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The mutationKey. + */ + @java.lang.Override + public com.google.spanner.v1.Mutation getMutationKey() { + return mutationKey_ == null + ? com.google.spanner.v1.Mutation.getDefaultInstance() + : mutationKey_; + } + /** + * + * + *
    +   * Optional. Required for read-write transactions on a multiplexed session
    +   * that commit mutations but do not perform any reads or queries. Clients
    +   * should randomly select one of the mutations from the mutation set and send
    +   * it as a part of this request.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.v1.MutationOrBuilder getMutationKeyOrBuilder() { + return mutationKey_ == null + ? com.google.spanner.v1.Mutation.getDefaultInstance() + : mutationKey_; + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -261,6 +329,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (((bitField0_ & 0x00000002) != 0)) { output.writeMessage(3, getRequestOptions()); } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(4, getMutationKey()); + } getUnknownFields().writeTo(output); } @@ -279,6 +350,9 @@ public int getSerializedSize() { if (((bitField0_ & 0x00000002) != 0)) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getRequestOptions()); } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getMutationKey()); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -304,6 +378,10 @@ public boolean equals(final java.lang.Object obj) { if (hasRequestOptions()) { if (!getRequestOptions().equals(other.getRequestOptions())) return false; } + if (hasMutationKey() != other.hasMutationKey()) return false; + if (hasMutationKey()) { + if (!getMutationKey().equals(other.getMutationKey())) return false; + } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @@ -325,6 +403,10 @@ public int hashCode() { hash = (37 * hash) + REQUEST_OPTIONS_FIELD_NUMBER; hash = (53 * hash) + getRequestOptions().hashCode(); } + if (hasMutationKey()) { + hash = (37 * hash) + MUTATION_KEY_FIELD_NUMBER; + hash = (53 * hash) + getMutationKey().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -468,6 +550,7 @@ private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { getOptionsFieldBuilder(); getRequestOptionsFieldBuilder(); + getMutationKeyFieldBuilder(); } } @@ -486,6 +569,11 @@ public Builder clear() { requestOptionsBuilder_.dispose(); requestOptionsBuilder_ = null; } + mutationKey_ = null; + if (mutationKeyBuilder_ != null) { + mutationKeyBuilder_.dispose(); + mutationKeyBuilder_ = null; + } return this; } @@ -535,6 +623,11 @@ private void buildPartial0(com.google.spanner.v1.BeginTransactionRequest result) requestOptionsBuilder_ == null ? requestOptions_ : requestOptionsBuilder_.build(); to_bitField0_ |= 0x00000002; } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.mutationKey_ = + mutationKeyBuilder_ == null ? mutationKey_ : mutationKeyBuilder_.build(); + to_bitField0_ |= 0x00000004; + } result.bitField0_ |= to_bitField0_; } @@ -594,6 +687,9 @@ public Builder mergeFrom(com.google.spanner.v1.BeginTransactionRequest other) { if (other.hasRequestOptions()) { mergeRequestOptions(other.getRequestOptions()); } + if (other.hasMutationKey()) { + mergeMutationKey(other.getMutationKey()); + } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; @@ -638,6 +734,12 @@ public Builder mergeFrom( bitField0_ |= 0x00000004; break; } // case 26 + case 34: + { + input.readMessage(getMutationKeyFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 34 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { @@ -1197,6 +1299,245 @@ public com.google.spanner.v1.RequestOptionsOrBuilder getRequestOptionsOrBuilder( return requestOptionsBuilder_; } + private com.google.spanner.v1.Mutation mutationKey_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.v1.Mutation, + com.google.spanner.v1.Mutation.Builder, + com.google.spanner.v1.MutationOrBuilder> + mutationKeyBuilder_; + /** + * + * + *
    +     * Optional. Required for read-write transactions on a multiplexed session
    +     * that commit mutations but do not perform any reads or queries. Clients
    +     * should randomly select one of the mutations from the mutation set and send
    +     * it as a part of this request.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the mutationKey field is set. + */ + public boolean hasMutationKey() { + return ((bitField0_ & 0x00000008) != 0); + } + /** + * + * + *
    +     * Optional. Required for read-write transactions on a multiplexed session
    +     * that commit mutations but do not perform any reads or queries. Clients
    +     * should randomly select one of the mutations from the mutation set and send
    +     * it as a part of this request.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The mutationKey. + */ + public com.google.spanner.v1.Mutation getMutationKey() { + if (mutationKeyBuilder_ == null) { + return mutationKey_ == null + ? com.google.spanner.v1.Mutation.getDefaultInstance() + : mutationKey_; + } else { + return mutationKeyBuilder_.getMessage(); + } + } + /** + * + * + *
    +     * Optional. Required for read-write transactions on a multiplexed session
    +     * that commit mutations but do not perform any reads or queries. Clients
    +     * should randomly select one of the mutations from the mutation set and send
    +     * it as a part of this request.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setMutationKey(com.google.spanner.v1.Mutation value) { + if (mutationKeyBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + mutationKey_ = value; + } else { + mutationKeyBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. Required for read-write transactions on a multiplexed session
    +     * that commit mutations but do not perform any reads or queries. Clients
    +     * should randomly select one of the mutations from the mutation set and send
    +     * it as a part of this request.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setMutationKey(com.google.spanner.v1.Mutation.Builder builderForValue) { + if (mutationKeyBuilder_ == null) { + mutationKey_ = builderForValue.build(); + } else { + mutationKeyBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. Required for read-write transactions on a multiplexed session
    +     * that commit mutations but do not perform any reads or queries. Clients
    +     * should randomly select one of the mutations from the mutation set and send
    +     * it as a part of this request.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeMutationKey(com.google.spanner.v1.Mutation value) { + if (mutationKeyBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && mutationKey_ != null + && mutationKey_ != com.google.spanner.v1.Mutation.getDefaultInstance()) { + getMutationKeyBuilder().mergeFrom(value); + } else { + mutationKey_ = value; + } + } else { + mutationKeyBuilder_.mergeFrom(value); + } + if (mutationKey_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + /** + * + * + *
    +     * Optional. Required for read-write transactions on a multiplexed session
    +     * that commit mutations but do not perform any reads or queries. Clients
    +     * should randomly select one of the mutations from the mutation set and send
    +     * it as a part of this request.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearMutationKey() { + bitField0_ = (bitField0_ & ~0x00000008); + mutationKey_ = null; + if (mutationKeyBuilder_ != null) { + mutationKeyBuilder_.dispose(); + mutationKeyBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. Required for read-write transactions on a multiplexed session
    +     * that commit mutations but do not perform any reads or queries. Clients
    +     * should randomly select one of the mutations from the mutation set and send
    +     * it as a part of this request.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.Mutation.Builder getMutationKeyBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getMutationKeyFieldBuilder().getBuilder(); + } + /** + * + * + *
    +     * Optional. Required for read-write transactions on a multiplexed session
    +     * that commit mutations but do not perform any reads or queries. Clients
    +     * should randomly select one of the mutations from the mutation set and send
    +     * it as a part of this request.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.MutationOrBuilder getMutationKeyOrBuilder() { + if (mutationKeyBuilder_ != null) { + return mutationKeyBuilder_.getMessageOrBuilder(); + } else { + return mutationKey_ == null + ? com.google.spanner.v1.Mutation.getDefaultInstance() + : mutationKey_; + } + } + /** + * + * + *
    +     * Optional. Required for read-write transactions on a multiplexed session
    +     * that commit mutations but do not perform any reads or queries. Clients
    +     * should randomly select one of the mutations from the mutation set and send
    +     * it as a part of this request.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.v1.Mutation, + com.google.spanner.v1.Mutation.Builder, + com.google.spanner.v1.MutationOrBuilder> + getMutationKeyFieldBuilder() { + if (mutationKeyBuilder_ == null) { + mutationKeyBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.v1.Mutation, + com.google.spanner.v1.Mutation.Builder, + com.google.spanner.v1.MutationOrBuilder>( + getMutationKey(), getParentForChildren(), isClean()); + mutationKey_ = null; + } + return mutationKeyBuilder_; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BeginTransactionRequestOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BeginTransactionRequestOrBuilder.java index d02389313b4..0d67610b513 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BeginTransactionRequestOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/BeginTransactionRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface BeginTransactionRequestOrBuilder @@ -140,4 +140,57 @@ public interface BeginTransactionRequestOrBuilder * .google.spanner.v1.RequestOptions request_options = 3; */ com.google.spanner.v1.RequestOptionsOrBuilder getRequestOptionsOrBuilder(); + + /** + * + * + *
    +   * Optional. Required for read-write transactions on a multiplexed session
    +   * that commit mutations but do not perform any reads or queries. Clients
    +   * should randomly select one of the mutations from the mutation set and send
    +   * it as a part of this request.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the mutationKey field is set. + */ + boolean hasMutationKey(); + /** + * + * + *
    +   * Optional. Required for read-write transactions on a multiplexed session
    +   * that commit mutations but do not perform any reads or queries. Clients
    +   * should randomly select one of the mutations from the mutation set and send
    +   * it as a part of this request.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The mutationKey. + */ + com.google.spanner.v1.Mutation getMutationKey(); + /** + * + * + *
    +   * Optional. Required for read-write transactions on a multiplexed session
    +   * that commit mutations but do not perform any reads or queries. Clients
    +   * should randomly select one of the mutations from the mutation set and send
    +   * it as a part of this request.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * .google.spanner.v1.Mutation mutation_key = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.v1.MutationOrBuilder getMutationKeyOrBuilder(); } diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitRequest.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitRequest.java index dae1f71dd92..d026425bfbb 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitRequest.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** @@ -495,6 +495,78 @@ public com.google.spanner.v1.RequestOptionsOrBuilder getRequestOptionsOrBuilder( : requestOptions_; } + public static final int PRECOMMIT_TOKEN_FIELD_NUMBER = 9; + private com.google.spanner.v1.MultiplexedSessionPrecommitToken precommitToken_; + /** + * + * + *
    +   * Optional. If the read-write transaction was executed on a multiplexed
    +   * session, the precommit token with the highest sequence number received in
    +   * this transaction attempt, should be included here. Failing to do so will
    +   * result in a FailedPrecondition error.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the precommitToken field is set. + */ + @java.lang.Override + public boolean hasPrecommitToken() { + return ((bitField0_ & 0x00000004) != 0); + } + /** + * + * + *
    +   * Optional. If the read-write transaction was executed on a multiplexed
    +   * session, the precommit token with the highest sequence number received in
    +   * this transaction attempt, should be included here. Failing to do so will
    +   * result in a FailedPrecondition error.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The precommitToken. + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken() { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + /** + * + * + *
    +   * Optional. If the read-write transaction was executed on a multiplexed
    +   * session, the precommit token with the highest sequence number received in
    +   * this transaction attempt, should be included here. Failing to do so will
    +   * result in a FailedPrecondition error.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder + getPrecommitTokenOrBuilder() { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -530,6 +602,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(8, getMaxCommitDelay()); } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(9, getPrecommitToken()); + } getUnknownFields().writeTo(output); } @@ -564,6 +639,9 @@ public int getSerializedSize() { if (((bitField0_ & 0x00000001) != 0)) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(8, getMaxCommitDelay()); } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(9, getPrecommitToken()); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -590,6 +668,10 @@ public boolean equals(final java.lang.Object obj) { if (hasRequestOptions()) { if (!getRequestOptions().equals(other.getRequestOptions())) return false; } + if (hasPrecommitToken() != other.hasPrecommitToken()) return false; + if (hasPrecommitToken()) { + if (!getPrecommitToken().equals(other.getPrecommitToken())) return false; + } if (!getTransactionCase().equals(other.getTransactionCase())) return false; switch (transactionCase_) { case 2: @@ -628,6 +710,10 @@ public int hashCode() { hash = (37 * hash) + REQUEST_OPTIONS_FIELD_NUMBER; hash = (53 * hash) + getRequestOptions().hashCode(); } + if (hasPrecommitToken()) { + hash = (37 * hash) + PRECOMMIT_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPrecommitToken().hashCode(); + } switch (transactionCase_) { case 2: hash = (37 * hash) + TRANSACTION_ID_FIELD_NUMBER; @@ -782,6 +868,7 @@ private void maybeForceBuilderInitialization() { getMutationsFieldBuilder(); getMaxCommitDelayFieldBuilder(); getRequestOptionsFieldBuilder(); + getPrecommitTokenFieldBuilder(); } } @@ -811,6 +898,11 @@ public Builder clear() { requestOptionsBuilder_.dispose(); requestOptionsBuilder_ = null; } + precommitToken_ = null; + if (precommitTokenBuilder_ != null) { + precommitTokenBuilder_.dispose(); + precommitTokenBuilder_ = null; + } transactionCase_ = 0; transaction_ = null; return this; @@ -879,6 +971,11 @@ private void buildPartial0(com.google.spanner.v1.CommitRequest result) { requestOptionsBuilder_ == null ? requestOptions_ : requestOptionsBuilder_.build(); to_bitField0_ |= 0x00000002; } + if (((from_bitField0_ & 0x00000080) != 0)) { + result.precommitToken_ = + precommitTokenBuilder_ == null ? precommitToken_ : precommitTokenBuilder_.build(); + to_bitField0_ |= 0x00000004; + } result.bitField0_ |= to_bitField0_; } @@ -976,6 +1073,9 @@ public Builder mergeFrom(com.google.spanner.v1.CommitRequest other) { if (other.hasRequestOptions()) { mergeRequestOptions(other.getRequestOptions()); } + if (other.hasPrecommitToken()) { + mergePrecommitToken(other.getPrecommitToken()); + } switch (other.getTransactionCase()) { case TRANSACTION_ID: { @@ -1067,6 +1167,12 @@ public Builder mergeFrom( bitField0_ |= 0x00000020; break; } // case 66 + case 74: + { + input.readMessage(getPrecommitTokenFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000080; + break; + } // case 74 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { @@ -2434,6 +2540,259 @@ public com.google.spanner.v1.RequestOptionsOrBuilder getRequestOptionsOrBuilder( return requestOptionsBuilder_; } + private com.google.spanner.v1.MultiplexedSessionPrecommitToken precommitToken_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder> + precommitTokenBuilder_; + /** + * + * + *
    +     * Optional. If the read-write transaction was executed on a multiplexed
    +     * session, the precommit token with the highest sequence number received in
    +     * this transaction attempt, should be included here. Failing to do so will
    +     * result in a FailedPrecondition error.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the precommitToken field is set. + */ + public boolean hasPrecommitToken() { + return ((bitField0_ & 0x00000080) != 0); + } + /** + * + * + *
    +     * Optional. If the read-write transaction was executed on a multiplexed
    +     * session, the precommit token with the highest sequence number received in
    +     * this transaction attempt, should be included here. Failing to do so will
    +     * result in a FailedPrecondition error.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The precommitToken. + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken() { + if (precommitTokenBuilder_ == null) { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } else { + return precommitTokenBuilder_.getMessage(); + } + } + /** + * + * + *
    +     * Optional. If the read-write transaction was executed on a multiplexed
    +     * session, the precommit token with the highest sequence number received in
    +     * this transaction attempt, should be included here. Failing to do so will
    +     * result in a FailedPrecondition error.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setPrecommitToken(com.google.spanner.v1.MultiplexedSessionPrecommitToken value) { + if (precommitTokenBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + precommitToken_ = value; + } else { + precommitTokenBuilder_.setMessage(value); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. If the read-write transaction was executed on a multiplexed
    +     * session, the precommit token with the highest sequence number received in
    +     * this transaction attempt, should be included here. Failing to do so will
    +     * result in a FailedPrecondition error.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setPrecommitToken( + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder builderForValue) { + if (precommitTokenBuilder_ == null) { + precommitToken_ = builderForValue.build(); + } else { + precommitTokenBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000080; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. If the read-write transaction was executed on a multiplexed
    +     * session, the precommit token with the highest sequence number received in
    +     * this transaction attempt, should be included here. Failing to do so will
    +     * result in a FailedPrecondition error.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergePrecommitToken( + com.google.spanner.v1.MultiplexedSessionPrecommitToken value) { + if (precommitTokenBuilder_ == null) { + if (((bitField0_ & 0x00000080) != 0) + && precommitToken_ != null + && precommitToken_ + != com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance()) { + getPrecommitTokenBuilder().mergeFrom(value); + } else { + precommitToken_ = value; + } + } else { + precommitTokenBuilder_.mergeFrom(value); + } + if (precommitToken_ != null) { + bitField0_ |= 0x00000080; + onChanged(); + } + return this; + } + /** + * + * + *
    +     * Optional. If the read-write transaction was executed on a multiplexed
    +     * session, the precommit token with the highest sequence number received in
    +     * this transaction attempt, should be included here. Failing to do so will
    +     * result in a FailedPrecondition error.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearPrecommitToken() { + bitField0_ = (bitField0_ & ~0x00000080); + precommitToken_ = null; + if (precommitTokenBuilder_ != null) { + precommitTokenBuilder_.dispose(); + precommitTokenBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. If the read-write transaction was executed on a multiplexed
    +     * session, the precommit token with the highest sequence number received in
    +     * this transaction attempt, should be included here. Failing to do so will
    +     * result in a FailedPrecondition error.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder + getPrecommitTokenBuilder() { + bitField0_ |= 0x00000080; + onChanged(); + return getPrecommitTokenFieldBuilder().getBuilder(); + } + /** + * + * + *
    +     * Optional. If the read-write transaction was executed on a multiplexed
    +     * session, the precommit token with the highest sequence number received in
    +     * this transaction attempt, should be included here. Failing to do so will
    +     * result in a FailedPrecondition error.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder + getPrecommitTokenOrBuilder() { + if (precommitTokenBuilder_ != null) { + return precommitTokenBuilder_.getMessageOrBuilder(); + } else { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + } + /** + * + * + *
    +     * Optional. If the read-write transaction was executed on a multiplexed
    +     * session, the precommit token with the highest sequence number received in
    +     * this transaction attempt, should be included here. Failing to do so will
    +     * result in a FailedPrecondition error.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder> + getPrecommitTokenFieldBuilder() { + if (precommitTokenBuilder_ == null) { + precommitTokenBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder>( + getPrecommitToken(), getParentForChildren(), isClean()); + precommitToken_ = null; + } + return precommitTokenBuilder_; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitRequestOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitRequestOrBuilder.java index 9f1ec3703f6..3a9703e11c0 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitRequestOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface CommitRequestOrBuilder @@ -298,5 +298,61 @@ public interface CommitRequestOrBuilder */ com.google.spanner.v1.RequestOptionsOrBuilder getRequestOptionsOrBuilder(); + /** + * + * + *
    +   * Optional. If the read-write transaction was executed on a multiplexed
    +   * session, the precommit token with the highest sequence number received in
    +   * this transaction attempt, should be included here. Failing to do so will
    +   * result in a FailedPrecondition error.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the precommitToken field is set. + */ + boolean hasPrecommitToken(); + /** + * + * + *
    +   * Optional. If the read-write transaction was executed on a multiplexed
    +   * session, the precommit token with the highest sequence number received in
    +   * this transaction attempt, should be included here. Failing to do so will
    +   * result in a FailedPrecondition error.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The precommitToken. + */ + com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken(); + /** + * + * + *
    +   * Optional. If the read-write transaction was executed on a multiplexed
    +   * session, the precommit token with the highest sequence number received in
    +   * this transaction attempt, should be included here. Failing to do so will
    +   * result in a FailedPrecondition error.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 9 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder getPrecommitTokenOrBuilder(); + com.google.spanner.v1.CommitRequest.TransactionCase getTransactionCase(); } diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponse.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponse.java index 7246fde5be6..af19b73626d 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponse.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponse.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/commit_response.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** @@ -631,6 +631,52 @@ public com.google.spanner.v1.CommitResponse.CommitStats getDefaultInstanceForTyp } private int bitField0_; + private int multiplexedSessionRetryCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object multiplexedSessionRetry_; + + public enum MultiplexedSessionRetryCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + PRECOMMIT_TOKEN(4), + MULTIPLEXEDSESSIONRETRY_NOT_SET(0); + private final int value; + + private MultiplexedSessionRetryCase(int value) { + this.value = value; + } + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static MultiplexedSessionRetryCase valueOf(int value) { + return forNumber(value); + } + + public static MultiplexedSessionRetryCase forNumber(int value) { + switch (value) { + case 4: + return PRECOMMIT_TOKEN; + case 0: + return MULTIPLEXEDSESSIONRETRY_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public MultiplexedSessionRetryCase getMultiplexedSessionRetryCase() { + return MultiplexedSessionRetryCase.forNumber(multiplexedSessionRetryCase_); + } + public static final int COMMIT_TIMESTAMP_FIELD_NUMBER = 1; private com.google.protobuf.Timestamp commitTimestamp_; /** @@ -737,6 +783,61 @@ public com.google.spanner.v1.CommitResponse.CommitStatsOrBuilder getCommitStatsO : commitStats_; } + public static final int PRECOMMIT_TOKEN_FIELD_NUMBER = 4; + /** + * + * + *
    +   * If specified, transaction has not committed yet.
    +   * Clients must retry the commit with the new precommit token.
    +   * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + * + * @return Whether the precommitToken field is set. + */ + @java.lang.Override + public boolean hasPrecommitToken() { + return multiplexedSessionRetryCase_ == 4; + } + /** + * + * + *
    +   * If specified, transaction has not committed yet.
    +   * Clients must retry the commit with the new precommit token.
    +   * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + * + * @return The precommitToken. + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken() { + if (multiplexedSessionRetryCase_ == 4) { + return (com.google.spanner.v1.MultiplexedSessionPrecommitToken) multiplexedSessionRetry_; + } + return com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance(); + } + /** + * + * + *
    +   * If specified, transaction has not committed yet.
    +   * Clients must retry the commit with the new precommit token.
    +   * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder + getPrecommitTokenOrBuilder() { + if (multiplexedSessionRetryCase_ == 4) { + return (com.google.spanner.v1.MultiplexedSessionPrecommitToken) multiplexedSessionRetry_; + } + return com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -757,6 +858,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (((bitField0_ & 0x00000002) != 0)) { output.writeMessage(2, getCommitStats()); } + if (multiplexedSessionRetryCase_ == 4) { + output.writeMessage( + 4, (com.google.spanner.v1.MultiplexedSessionPrecommitToken) multiplexedSessionRetry_); + } getUnknownFields().writeTo(output); } @@ -772,6 +877,11 @@ public int getSerializedSize() { if (((bitField0_ & 0x00000002) != 0)) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getCommitStats()); } + if (multiplexedSessionRetryCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, (com.google.spanner.v1.MultiplexedSessionPrecommitToken) multiplexedSessionRetry_); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -795,6 +905,15 @@ public boolean equals(final java.lang.Object obj) { if (hasCommitStats()) { if (!getCommitStats().equals(other.getCommitStats())) return false; } + if (!getMultiplexedSessionRetryCase().equals(other.getMultiplexedSessionRetryCase())) + return false; + switch (multiplexedSessionRetryCase_) { + case 4: + if (!getPrecommitToken().equals(other.getPrecommitToken())) return false; + break; + case 0: + default: + } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @@ -814,6 +933,14 @@ public int hashCode() { hash = (37 * hash) + COMMIT_STATS_FIELD_NUMBER; hash = (53 * hash) + getCommitStats().hashCode(); } + switch (multiplexedSessionRetryCase_) { + case 4: + hash = (37 * hash) + PRECOMMIT_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPrecommitToken().hashCode(); + break; + case 0: + default: + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -972,6 +1099,11 @@ public Builder clear() { commitStatsBuilder_.dispose(); commitStatsBuilder_ = null; } + if (precommitTokenBuilder_ != null) { + precommitTokenBuilder_.clear(); + } + multiplexedSessionRetryCase_ = 0; + multiplexedSessionRetry_ = null; return this; } @@ -1001,6 +1133,7 @@ public com.google.spanner.v1.CommitResponse buildPartial() { if (bitField0_ != 0) { buildPartial0(result); } + buildPartialOneofs(result); onBuilt(); return result; } @@ -1021,6 +1154,14 @@ private void buildPartial0(com.google.spanner.v1.CommitResponse result) { result.bitField0_ |= to_bitField0_; } + private void buildPartialOneofs(com.google.spanner.v1.CommitResponse result) { + result.multiplexedSessionRetryCase_ = multiplexedSessionRetryCase_; + result.multiplexedSessionRetry_ = this.multiplexedSessionRetry_; + if (multiplexedSessionRetryCase_ == 4 && precommitTokenBuilder_ != null) { + result.multiplexedSessionRetry_ = precommitTokenBuilder_.build(); + } + } + @java.lang.Override public Builder clone() { return super.clone(); @@ -1072,6 +1213,17 @@ public Builder mergeFrom(com.google.spanner.v1.CommitResponse other) { if (other.hasCommitStats()) { mergeCommitStats(other.getCommitStats()); } + switch (other.getMultiplexedSessionRetryCase()) { + case PRECOMMIT_TOKEN: + { + mergePrecommitToken(other.getPrecommitToken()); + break; + } + case MULTIPLEXEDSESSIONRETRY_NOT_SET: + { + break; + } + } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; @@ -1110,6 +1262,12 @@ public Builder mergeFrom( bitField0_ |= 0x00000002; break; } // case 18 + case 34: + { + input.readMessage(getPrecommitTokenFieldBuilder().getBuilder(), extensionRegistry); + multiplexedSessionRetryCase_ = 4; + break; + } // case 34 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { @@ -1127,6 +1285,20 @@ public Builder mergeFrom( return this; } + private int multiplexedSessionRetryCase_ = 0; + private java.lang.Object multiplexedSessionRetry_; + + public MultiplexedSessionRetryCase getMultiplexedSessionRetryCase() { + return MultiplexedSessionRetryCase.forNumber(multiplexedSessionRetryCase_); + } + + public Builder clearMultiplexedSessionRetry() { + multiplexedSessionRetryCase_ = 0; + multiplexedSessionRetry_ = null; + onChanged(); + return this; + } + private int bitField0_; private com.google.protobuf.Timestamp commitTimestamp_; @@ -1519,6 +1691,230 @@ public com.google.spanner.v1.CommitResponse.CommitStatsOrBuilder getCommitStatsO return commitStatsBuilder_; } + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder> + precommitTokenBuilder_; + /** + * + * + *
    +     * If specified, transaction has not committed yet.
    +     * Clients must retry the commit with the new precommit token.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + * + * @return Whether the precommitToken field is set. + */ + @java.lang.Override + public boolean hasPrecommitToken() { + return multiplexedSessionRetryCase_ == 4; + } + /** + * + * + *
    +     * If specified, transaction has not committed yet.
    +     * Clients must retry the commit with the new precommit token.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + * + * @return The precommitToken. + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken() { + if (precommitTokenBuilder_ == null) { + if (multiplexedSessionRetryCase_ == 4) { + return (com.google.spanner.v1.MultiplexedSessionPrecommitToken) multiplexedSessionRetry_; + } + return com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance(); + } else { + if (multiplexedSessionRetryCase_ == 4) { + return precommitTokenBuilder_.getMessage(); + } + return com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance(); + } + } + /** + * + * + *
    +     * If specified, transaction has not committed yet.
    +     * Clients must retry the commit with the new precommit token.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + */ + public Builder setPrecommitToken(com.google.spanner.v1.MultiplexedSessionPrecommitToken value) { + if (precommitTokenBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + multiplexedSessionRetry_ = value; + onChanged(); + } else { + precommitTokenBuilder_.setMessage(value); + } + multiplexedSessionRetryCase_ = 4; + return this; + } + /** + * + * + *
    +     * If specified, transaction has not committed yet.
    +     * Clients must retry the commit with the new precommit token.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + */ + public Builder setPrecommitToken( + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder builderForValue) { + if (precommitTokenBuilder_ == null) { + multiplexedSessionRetry_ = builderForValue.build(); + onChanged(); + } else { + precommitTokenBuilder_.setMessage(builderForValue.build()); + } + multiplexedSessionRetryCase_ = 4; + return this; + } + /** + * + * + *
    +     * If specified, transaction has not committed yet.
    +     * Clients must retry the commit with the new precommit token.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + */ + public Builder mergePrecommitToken( + com.google.spanner.v1.MultiplexedSessionPrecommitToken value) { + if (precommitTokenBuilder_ == null) { + if (multiplexedSessionRetryCase_ == 4 + && multiplexedSessionRetry_ + != com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance()) { + multiplexedSessionRetry_ = + com.google.spanner.v1.MultiplexedSessionPrecommitToken.newBuilder( + (com.google.spanner.v1.MultiplexedSessionPrecommitToken) + multiplexedSessionRetry_) + .mergeFrom(value) + .buildPartial(); + } else { + multiplexedSessionRetry_ = value; + } + onChanged(); + } else { + if (multiplexedSessionRetryCase_ == 4) { + precommitTokenBuilder_.mergeFrom(value); + } else { + precommitTokenBuilder_.setMessage(value); + } + } + multiplexedSessionRetryCase_ = 4; + return this; + } + /** + * + * + *
    +     * If specified, transaction has not committed yet.
    +     * Clients must retry the commit with the new precommit token.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + */ + public Builder clearPrecommitToken() { + if (precommitTokenBuilder_ == null) { + if (multiplexedSessionRetryCase_ == 4) { + multiplexedSessionRetryCase_ = 0; + multiplexedSessionRetry_ = null; + onChanged(); + } + } else { + if (multiplexedSessionRetryCase_ == 4) { + multiplexedSessionRetryCase_ = 0; + multiplexedSessionRetry_ = null; + } + precommitTokenBuilder_.clear(); + } + return this; + } + /** + * + * + *
    +     * If specified, transaction has not committed yet.
    +     * Clients must retry the commit with the new precommit token.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder + getPrecommitTokenBuilder() { + return getPrecommitTokenFieldBuilder().getBuilder(); + } + /** + * + * + *
    +     * If specified, transaction has not committed yet.
    +     * Clients must retry the commit with the new precommit token.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder + getPrecommitTokenOrBuilder() { + if ((multiplexedSessionRetryCase_ == 4) && (precommitTokenBuilder_ != null)) { + return precommitTokenBuilder_.getMessageOrBuilder(); + } else { + if (multiplexedSessionRetryCase_ == 4) { + return (com.google.spanner.v1.MultiplexedSessionPrecommitToken) multiplexedSessionRetry_; + } + return com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance(); + } + } + /** + * + * + *
    +     * If specified, transaction has not committed yet.
    +     * Clients must retry the commit with the new precommit token.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder> + getPrecommitTokenFieldBuilder() { + if (precommitTokenBuilder_ == null) { + if (!(multiplexedSessionRetryCase_ == 4)) { + multiplexedSessionRetry_ = + com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance(); + } + precommitTokenBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder>( + (com.google.spanner.v1.MultiplexedSessionPrecommitToken) multiplexedSessionRetry_, + getParentForChildren(), + isClean()); + multiplexedSessionRetry_ = null; + } + multiplexedSessionRetryCase_ = 4; + onChanged(); + return precommitTokenBuilder_; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponseOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponseOrBuilder.java index 75d7b782e63..9a4e8b04a0a 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponseOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponseOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/commit_response.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface CommitResponseOrBuilder @@ -99,4 +99,44 @@ public interface CommitResponseOrBuilder * .google.spanner.v1.CommitResponse.CommitStats commit_stats = 2; */ com.google.spanner.v1.CommitResponse.CommitStatsOrBuilder getCommitStatsOrBuilder(); + + /** + * + * + *
    +   * If specified, transaction has not committed yet.
    +   * Clients must retry the commit with the new precommit token.
    +   * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + * + * @return Whether the precommitToken field is set. + */ + boolean hasPrecommitToken(); + /** + * + * + *
    +   * If specified, transaction has not committed yet.
    +   * Clients must retry the commit with the new precommit token.
    +   * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + * + * @return The precommitToken. + */ + com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken(); + /** + * + * + *
    +   * If specified, transaction has not committed yet.
    +   * Clients must retry the commit with the new precommit token.
    +   * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; + */ + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder getPrecommitTokenOrBuilder(); + + com.google.spanner.v1.CommitResponse.MultiplexedSessionRetryCase getMultiplexedSessionRetryCase(); } diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponseProto.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponseProto.java index b21d8f2778e..bd550231631 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponseProto.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponseProto.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/commit_response.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public final class CommitResponseProto { @@ -47,22 +47,27 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { java.lang.String[] descriptorData = { "\n\'google/spanner/v1/commit_response.prot" + "o\022\021google.spanner.v1\032\037google/protobuf/ti" - + "mestamp.proto\"\262\001\n\016CommitResponse\0224\n\020comm" - + "it_timestamp\030\001 \001(\0132\032.google.protobuf.Tim" - + "estamp\022C\n\014commit_stats\030\002 \001(\0132-.google.sp" - + "anner.v1.CommitResponse.CommitStats\032%\n\013C" - + "ommitStats\022\026\n\016mutation_count\030\001 \001(\003B\266\001\n\025c" - + "om.google.spanner.v1B\023CommitResponseProt" - + "oP\001Z5cloud.google.com/go/spanner/apiv1/s" - + "pannerpb;spannerpb\252\002\027Google.Cloud.Spanne" - + "r.V1\312\002\027Google\\Cloud\\Spanner\\V1\352\002\032Google:" - + ":Cloud::Spanner::V1b\006proto3" + + "mestamp.proto\032#google/spanner/v1/transac" + + "tion.proto\"\235\002\n\016CommitResponse\0224\n\020commit_" + + "timestamp\030\001 \001(\0132\032.google.protobuf.Timest" + + "amp\022C\n\014commit_stats\030\002 \001(\0132-.google.spann" + + "er.v1.CommitResponse.CommitStats\022N\n\017prec" + + "ommit_token\030\004 \001(\01323.google.spanner.v1.Mu" + + "ltiplexedSessionPrecommitTokenH\000\032%\n\013Comm" + + "itStats\022\026\n\016mutation_count\030\001 \001(\003B\031\n\027Multi" + + "plexedSessionRetryB\266\001\n\025com.google.spanne" + + "r.v1B\023CommitResponseProtoP\001Z5cloud.googl" + + "e.com/go/spanner/apiv1/spannerpb;spanner" + + "pb\252\002\027Google.Cloud.Spanner.V1\312\002\027Google\\Cl" + + "oud\\Spanner\\V1\352\002\032Google::Cloud::Spanner:" + + ":V1b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { com.google.protobuf.TimestampProto.getDescriptor(), + com.google.spanner.v1.TransactionProto.getDescriptor(), }); internal_static_google_spanner_v1_CommitResponse_descriptor = getDescriptor().getMessageTypes().get(0); @@ -70,7 +75,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_v1_CommitResponse_descriptor, new java.lang.String[] { - "CommitTimestamp", "CommitStats", + "CommitTimestamp", "CommitStats", "PrecommitToken", "MultiplexedSessionRetry", }); internal_static_google_spanner_v1_CommitResponse_CommitStats_descriptor = internal_static_google_spanner_v1_CommitResponse_descriptor.getNestedTypes().get(0); @@ -81,6 +86,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "MutationCount", }); com.google.protobuf.TimestampProto.getDescriptor(); + com.google.spanner.v1.TransactionProto.getDescriptor(); } // @@protoc_insertion_point(outer_class_scope) diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CreateSessionRequest.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CreateSessionRequest.java index 021253ec534..4d2f7d2b2eb 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CreateSessionRequest.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CreateSessionRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CreateSessionRequestOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CreateSessionRequestOrBuilder.java index ab3ce67f2c9..81ac68f4993 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CreateSessionRequestOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CreateSessionRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface CreateSessionRequestOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DeleteSessionRequest.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DeleteSessionRequest.java index 315bbad7c79..f42ad674732 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DeleteSessionRequest.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DeleteSessionRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DeleteSessionRequestOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DeleteSessionRequestOrBuilder.java index d835370662c..204afe99f8d 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DeleteSessionRequestOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DeleteSessionRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface DeleteSessionRequestOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DirectedReadOptions.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DirectedReadOptions.java index 7caddb779a3..ed1d01e8916 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DirectedReadOptions.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DirectedReadOptions.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DirectedReadOptionsOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DirectedReadOptionsOrBuilder.java index 04a9f7fd4dd..73be3a1be20 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DirectedReadOptionsOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/DirectedReadOptionsOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface DirectedReadOptionsOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlRequest.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlRequest.java index b69e41096af..e829327651b 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlRequest.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlRequestOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlRequestOrBuilder.java index ed0b7960f0d..cdada2baa3f 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlRequestOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface ExecuteBatchDmlRequestOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlResponse.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlResponse.java index c1fbfa8f77e..8fed7627083 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlResponse.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlResponse.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** @@ -254,6 +254,81 @@ public com.google.rpc.StatusOrBuilder getStatusOrBuilder() { return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; } + public static final int PRECOMMIT_TOKEN_FIELD_NUMBER = 3; + private com.google.spanner.v1.MultiplexedSessionPrecommitToken precommitToken_; + /** + * + * + *
    +   * Optional. A precommit token will be included if the read-write transaction
    +   * is on a multiplexed session.
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the precommitToken field is set. + */ + @java.lang.Override + public boolean hasPrecommitToken() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * + * + *
    +   * Optional. A precommit token will be included if the read-write transaction
    +   * is on a multiplexed session.
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The precommitToken. + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken() { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + /** + * + * + *
    +   * Optional. A precommit token will be included if the read-write transaction
    +   * is on a multiplexed session.
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder + getPrecommitTokenOrBuilder() { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -274,6 +349,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(2, getStatus()); } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getPrecommitToken()); + } getUnknownFields().writeTo(output); } @@ -289,6 +367,9 @@ public int getSerializedSize() { if (((bitField0_ & 0x00000001) != 0)) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getStatus()); } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getPrecommitToken()); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -310,6 +391,10 @@ public boolean equals(final java.lang.Object obj) { if (hasStatus()) { if (!getStatus().equals(other.getStatus())) return false; } + if (hasPrecommitToken() != other.hasPrecommitToken()) return false; + if (hasPrecommitToken()) { + if (!getPrecommitToken().equals(other.getPrecommitToken())) return false; + } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @@ -329,6 +414,10 @@ public int hashCode() { hash = (37 * hash) + STATUS_FIELD_NUMBER; hash = (53 * hash) + getStatus().hashCode(); } + if (hasPrecommitToken()) { + hash = (37 * hash) + PRECOMMIT_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPrecommitToken().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -501,6 +590,7 @@ private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { getResultSetsFieldBuilder(); getStatusFieldBuilder(); + getPrecommitTokenFieldBuilder(); } } @@ -520,6 +610,11 @@ public Builder clear() { statusBuilder_.dispose(); statusBuilder_ = null; } + precommitToken_ = null; + if (precommitTokenBuilder_ != null) { + precommitTokenBuilder_.dispose(); + precommitTokenBuilder_ = null; + } return this; } @@ -574,6 +669,11 @@ private void buildPartial0(com.google.spanner.v1.ExecuteBatchDmlResponse result) result.status_ = statusBuilder_ == null ? status_ : statusBuilder_.build(); to_bitField0_ |= 0x00000001; } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.precommitToken_ = + precommitTokenBuilder_ == null ? precommitToken_ : precommitTokenBuilder_.build(); + to_bitField0_ |= 0x00000002; + } result.bitField0_ |= to_bitField0_; } @@ -652,6 +752,9 @@ public Builder mergeFrom(com.google.spanner.v1.ExecuteBatchDmlResponse other) { if (other.hasStatus()) { mergeStatus(other.getStatus()); } + if (other.hasPrecommitToken()) { + mergePrecommitToken(other.getPrecommitToken()); + } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; @@ -696,6 +799,12 @@ public Builder mergeFrom( bitField0_ |= 0x00000002; break; } // case 18 + case 26: + { + input.readMessage(getPrecommitTokenFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { @@ -1392,6 +1501,268 @@ public com.google.rpc.StatusOrBuilder getStatusOrBuilder() { return statusBuilder_; } + private com.google.spanner.v1.MultiplexedSessionPrecommitToken precommitToken_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder> + precommitTokenBuilder_; + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the precommitToken field is set. + */ + public boolean hasPrecommitToken() { + return ((bitField0_ & 0x00000004) != 0); + } + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The precommitToken. + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken() { + if (precommitTokenBuilder_ == null) { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } else { + return precommitTokenBuilder_.getMessage(); + } + } + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setPrecommitToken(com.google.spanner.v1.MultiplexedSessionPrecommitToken value) { + if (precommitTokenBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + precommitToken_ = value; + } else { + precommitTokenBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setPrecommitToken( + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder builderForValue) { + if (precommitTokenBuilder_ == null) { + precommitToken_ = builderForValue.build(); + } else { + precommitTokenBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergePrecommitToken( + com.google.spanner.v1.MultiplexedSessionPrecommitToken value) { + if (precommitTokenBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && precommitToken_ != null + && precommitToken_ + != com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance()) { + getPrecommitTokenBuilder().mergeFrom(value); + } else { + precommitToken_ = value; + } + } else { + precommitTokenBuilder_.mergeFrom(value); + } + if (precommitToken_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearPrecommitToken() { + bitField0_ = (bitField0_ & ~0x00000004); + precommitToken_ = null; + if (precommitTokenBuilder_ != null) { + precommitTokenBuilder_.dispose(); + precommitTokenBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder + getPrecommitTokenBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getPrecommitTokenFieldBuilder().getBuilder(); + } + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder + getPrecommitTokenOrBuilder() { + if (precommitTokenBuilder_ != null) { + return precommitTokenBuilder_.getMessageOrBuilder(); + } else { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + } + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder> + getPrecommitTokenFieldBuilder() { + if (precommitTokenBuilder_ == null) { + precommitTokenBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder>( + getPrecommitToken(), getParentForChildren(), isClean()); + precommitToken_ = null; + } + return precommitTokenBuilder_; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlResponseOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlResponseOrBuilder.java index 2075f7a34e3..6b7dbfe17a8 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlResponseOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteBatchDmlResponseOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface ExecuteBatchDmlResponseOrBuilder @@ -152,4 +152,63 @@ public interface ExecuteBatchDmlResponseOrBuilder * .google.rpc.Status status = 2; */ com.google.rpc.StatusOrBuilder getStatusOrBuilder(); + + /** + * + * + *
    +   * Optional. A precommit token will be included if the read-write transaction
    +   * is on a multiplexed session.
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the precommitToken field is set. + */ + boolean hasPrecommitToken(); + /** + * + * + *
    +   * Optional. A precommit token will be included if the read-write transaction
    +   * is on a multiplexed session.
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The precommitToken. + */ + com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken(); + /** + * + * + *
    +   * Optional. A precommit token will be included if the read-write transaction
    +   * is on a multiplexed session.
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder getPrecommitTokenOrBuilder(); } diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteSqlRequest.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteSqlRequest.java index f2643e58d24..94ec19c8db9 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteSqlRequest.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteSqlRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** @@ -115,13 +115,37 @@ public enum QueryMode implements com.google.protobuf.ProtocolMessageEnum { * * *
    -     * This mode returns both the query plan and the execution statistics along
    -     * with the results.
    +     * This mode returns the query plan, overall execution statistics,
    +     * operator level execution statistics along with the results. This has a
    +     * performance overhead compared to the other modes. It is not recommended
    +     * to use this mode for production traffic.
          * 
    * * PROFILE = 2; */ PROFILE(2), + /** + * + * + *
    +     * This mode returns the overall (but not operator-level) execution
    +     * statistics along with the results.
    +     * 
    + * + * WITH_STATS = 3; + */ + WITH_STATS(3), + /** + * + * + *
    +     * This mode returns the query plan, overall (but not operator-level)
    +     * execution statistics along with the results.
    +     * 
    + * + * WITH_PLAN_AND_STATS = 4; + */ + WITH_PLAN_AND_STATS(4), UNRECOGNIZED(-1), ; @@ -150,13 +174,37 @@ public enum QueryMode implements com.google.protobuf.ProtocolMessageEnum { * * *
    -     * This mode returns both the query plan and the execution statistics along
    -     * with the results.
    +     * This mode returns the query plan, overall execution statistics,
    +     * operator level execution statistics along with the results. This has a
    +     * performance overhead compared to the other modes. It is not recommended
    +     * to use this mode for production traffic.
          * 
    * * PROFILE = 2; */ public static final int PROFILE_VALUE = 2; + /** + * + * + *
    +     * This mode returns the overall (but not operator-level) execution
    +     * statistics along with the results.
    +     * 
    + * + * WITH_STATS = 3; + */ + public static final int WITH_STATS_VALUE = 3; + /** + * + * + *
    +     * This mode returns the query plan, overall (but not operator-level)
    +     * execution statistics along with the results.
    +     * 
    + * + * WITH_PLAN_AND_STATS = 4; + */ + public static final int WITH_PLAN_AND_STATS_VALUE = 4; public final int getNumber() { if (this == UNRECOGNIZED) { @@ -188,6 +236,10 @@ public static QueryMode forNumber(int value) { return PLAN; case 2: return PROFILE; + case 3: + return WITH_STATS; + case 4: + return WITH_PLAN_AND_STATS; default: return null; } diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteSqlRequestOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteSqlRequestOrBuilder.java index c45941dfb03..de635dc34e2 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteSqlRequestOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ExecuteSqlRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface ExecuteSqlRequestOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/GetSessionRequest.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/GetSessionRequest.java index f9dd6b08689..13fbaad5063 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/GetSessionRequest.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/GetSessionRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/GetSessionRequestOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/GetSessionRequestOrBuilder.java index a428dc63829..d93462a3bfa 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/GetSessionRequestOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/GetSessionRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface GetSessionRequestOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeyRange.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeyRange.java index 328a6910f1c..b751846bcff 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeyRange.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeyRange.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/keys.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeyRangeOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeyRangeOrBuilder.java index e149128fa46..af2d46ca473 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeyRangeOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeyRangeOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/keys.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface KeyRangeOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeySet.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeySet.java index 97631a35219..e4ce7254cc3 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeySet.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeySet.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/keys.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeySetOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeySetOrBuilder.java index b340e8ace72..8385fb6dddc 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeySetOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeySetOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/keys.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface KeySetOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeysProto.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeysProto.java index 3215c87d4b8..174004b9566 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeysProto.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/KeysProto.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/keys.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public final class KeysProto { diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsRequest.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsRequest.java index fb957816d27..c3204ba61cb 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsRequest.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsRequestOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsRequestOrBuilder.java index 367b98c243f..72db06ae585 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsRequestOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface ListSessionsRequestOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsResponse.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsResponse.java index 8d785f34789..bcc4cf3cc10 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsResponse.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsResponse.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsResponseOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsResponseOrBuilder.java index 6af91524494..54991625eac 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsResponseOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ListSessionsResponseOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface ListSessionsResponseOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MultiplexedSessionPrecommitToken.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MultiplexedSessionPrecommitToken.java new file mode 100644 index 00000000000..406ebe14199 --- /dev/null +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MultiplexedSessionPrecommitToken.java @@ -0,0 +1,648 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/v1/transaction.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.v1; + +/** + * + * + *
    + * When a read-write transaction is executed on a multiplexed session,
    + * this precommit token is sent back to the client
    + * as a part of the [Transaction] message in the BeginTransaction response and
    + * also as a part of the [ResultSet] and [PartialResultSet] responses.
    + * 
    + * + * Protobuf type {@code google.spanner.v1.MultiplexedSessionPrecommitToken} + */ +public final class MultiplexedSessionPrecommitToken extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.spanner.v1.MultiplexedSessionPrecommitToken) + MultiplexedSessionPrecommitTokenOrBuilder { + private static final long serialVersionUID = 0L; + // Use MultiplexedSessionPrecommitToken.newBuilder() to construct. + private MultiplexedSessionPrecommitToken( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private MultiplexedSessionPrecommitToken() { + precommitToken_ = com.google.protobuf.ByteString.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new MultiplexedSessionPrecommitToken(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_MultiplexedSessionPrecommitToken_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_MultiplexedSessionPrecommitToken_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.MultiplexedSessionPrecommitToken.class, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder.class); + } + + public static final int PRECOMMIT_TOKEN_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString precommitToken_ = com.google.protobuf.ByteString.EMPTY; + /** + * + * + *
    +   * Opaque precommit token.
    +   * 
    + * + * bytes precommit_token = 1; + * + * @return The precommitToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPrecommitToken() { + return precommitToken_; + } + + public static final int SEQ_NUM_FIELD_NUMBER = 2; + private int seqNum_ = 0; + /** + * + * + *
    +   * An incrementing seq number is generated on every precommit token
    +   * that is returned. Clients should remember the precommit token with the
    +   * highest sequence number from the current transaction attempt.
    +   * 
    + * + * int32 seq_num = 2; + * + * @return The seqNum. + */ + @java.lang.Override + public int getSeqNum() { + return seqNum_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!precommitToken_.isEmpty()) { + output.writeBytes(1, precommitToken_); + } + if (seqNum_ != 0) { + output.writeInt32(2, seqNum_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!precommitToken_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(1, precommitToken_); + } + if (seqNum_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, seqNum_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.spanner.v1.MultiplexedSessionPrecommitToken)) { + return super.equals(obj); + } + com.google.spanner.v1.MultiplexedSessionPrecommitToken other = + (com.google.spanner.v1.MultiplexedSessionPrecommitToken) obj; + + if (!getPrecommitToken().equals(other.getPrecommitToken())) return false; + if (getSeqNum() != other.getSeqNum()) return false; + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PRECOMMIT_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPrecommitToken().hashCode(); + hash = (37 * hash) + SEQ_NUM_FIELD_NUMBER; + hash = (53 * hash) + getSeqNum(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.v1.MultiplexedSessionPrecommitToken prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
    +   * When a read-write transaction is executed on a multiplexed session,
    +   * this precommit token is sent back to the client
    +   * as a part of the [Transaction] message in the BeginTransaction response and
    +   * also as a part of the [ResultSet] and [PartialResultSet] responses.
    +   * 
    + * + * Protobuf type {@code google.spanner.v1.MultiplexedSessionPrecommitToken} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.v1.MultiplexedSessionPrecommitToken) + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_MultiplexedSessionPrecommitToken_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_MultiplexedSessionPrecommitToken_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.v1.MultiplexedSessionPrecommitToken.class, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder.class); + } + + // Construct using com.google.spanner.v1.MultiplexedSessionPrecommitToken.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + precommitToken_ = com.google.protobuf.ByteString.EMPTY; + seqNum_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.v1.TransactionProto + .internal_static_google_spanner_v1_MultiplexedSessionPrecommitToken_descriptor; + } + + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getDefaultInstanceForType() { + return com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitToken build() { + com.google.spanner.v1.MultiplexedSessionPrecommitToken result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitToken buildPartial() { + com.google.spanner.v1.MultiplexedSessionPrecommitToken result = + new com.google.spanner.v1.MultiplexedSessionPrecommitToken(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.spanner.v1.MultiplexedSessionPrecommitToken result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.precommitToken_ = precommitToken_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.seqNum_ = seqNum_; + } + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.spanner.v1.MultiplexedSessionPrecommitToken) { + return mergeFrom((com.google.spanner.v1.MultiplexedSessionPrecommitToken) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.spanner.v1.MultiplexedSessionPrecommitToken other) { + if (other == com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance()) + return this; + if (other.getPrecommitToken() != com.google.protobuf.ByteString.EMPTY) { + setPrecommitToken(other.getPrecommitToken()); + } + if (other.getSeqNum() != 0) { + setSeqNum(other.getSeqNum()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + precommitToken_ = input.readBytes(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 16: + { + seqNum_ = input.readInt32(); + bitField0_ |= 0x00000002; + break; + } // case 16 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.protobuf.ByteString precommitToken_ = com.google.protobuf.ByteString.EMPTY; + /** + * + * + *
    +     * Opaque precommit token.
    +     * 
    + * + * bytes precommit_token = 1; + * + * @return The precommitToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getPrecommitToken() { + return precommitToken_; + } + /** + * + * + *
    +     * Opaque precommit token.
    +     * 
    + * + * bytes precommit_token = 1; + * + * @param value The precommitToken to set. + * @return This builder for chaining. + */ + public Builder setPrecommitToken(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + precommitToken_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
    +     * Opaque precommit token.
    +     * 
    + * + * bytes precommit_token = 1; + * + * @return This builder for chaining. + */ + public Builder clearPrecommitToken() { + bitField0_ = (bitField0_ & ~0x00000001); + precommitToken_ = getDefaultInstance().getPrecommitToken(); + onChanged(); + return this; + } + + private int seqNum_; + /** + * + * + *
    +     * An incrementing seq number is generated on every precommit token
    +     * that is returned. Clients should remember the precommit token with the
    +     * highest sequence number from the current transaction attempt.
    +     * 
    + * + * int32 seq_num = 2; + * + * @return The seqNum. + */ + @java.lang.Override + public int getSeqNum() { + return seqNum_; + } + /** + * + * + *
    +     * An incrementing seq number is generated on every precommit token
    +     * that is returned. Clients should remember the precommit token with the
    +     * highest sequence number from the current transaction attempt.
    +     * 
    + * + * int32 seq_num = 2; + * + * @param value The seqNum to set. + * @return This builder for chaining. + */ + public Builder setSeqNum(int value) { + + seqNum_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
    +     * An incrementing seq number is generated on every precommit token
    +     * that is returned. Clients should remember the precommit token with the
    +     * highest sequence number from the current transaction attempt.
    +     * 
    + * + * int32 seq_num = 2; + * + * @return This builder for chaining. + */ + public Builder clearSeqNum() { + bitField0_ = (bitField0_ & ~0x00000002); + seqNum_ = 0; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.spanner.v1.MultiplexedSessionPrecommitToken) + } + + // @@protoc_insertion_point(class_scope:google.spanner.v1.MultiplexedSessionPrecommitToken) + private static final com.google.spanner.v1.MultiplexedSessionPrecommitToken DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.spanner.v1.MultiplexedSessionPrecommitToken(); + } + + public static com.google.spanner.v1.MultiplexedSessionPrecommitToken getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public MultiplexedSessionPrecommitToken parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MultiplexedSessionPrecommitTokenOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MultiplexedSessionPrecommitTokenOrBuilder.java new file mode 100644 index 00000000000..1c8a9d74b98 --- /dev/null +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MultiplexedSessionPrecommitTokenOrBuilder.java @@ -0,0 +1,54 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/v1/transaction.proto + +// Protobuf Java Version: 3.25.5 +package com.google.spanner.v1; + +public interface MultiplexedSessionPrecommitTokenOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.v1.MultiplexedSessionPrecommitToken) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Opaque precommit token.
    +   * 
    + * + * bytes precommit_token = 1; + * + * @return The precommitToken. + */ + com.google.protobuf.ByteString getPrecommitToken(); + + /** + * + * + *
    +   * An incrementing seq number is generated on every precommit token
    +   * that is returned. Clients should remember the precommit token with the
    +   * highest sequence number from the current transaction attempt.
    +   * 
    + * + * int32 seq_num = 2; + * + * @return The seqNum. + */ + int getSeqNum(); +} diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Mutation.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Mutation.java index 251c055bd7d..1c4c5c76b85 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Mutation.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Mutation.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/mutation.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MutationOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MutationOrBuilder.java index b8a912bc1b5..e2e89c432f9 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MutationOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MutationOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/mutation.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface MutationOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MutationProto.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MutationProto.java index 4e1755a9f42..9acd3b6542c 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MutationProto.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MutationProto.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/mutation.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public final class MutationProto { diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartialResultSet.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartialResultSet.java index 593d356384a..261b5a8e03b 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartialResultSet.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartialResultSet.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/result_set.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** @@ -653,6 +653,81 @@ public com.google.spanner.v1.ResultSetStatsOrBuilder getStatsOrBuilder() { return stats_ == null ? com.google.spanner.v1.ResultSetStats.getDefaultInstance() : stats_; } + public static final int PRECOMMIT_TOKEN_FIELD_NUMBER = 8; + private com.google.spanner.v1.MultiplexedSessionPrecommitToken precommitToken_; + /** + * + * + *
    +   * Optional. A precommit token will be included if the read-write transaction
    +   * is on a multiplexed session.
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the precommitToken field is set. + */ + @java.lang.Override + public boolean hasPrecommitToken() { + return ((bitField0_ & 0x00000004) != 0); + } + /** + * + * + *
    +   * Optional. A precommit token will be included if the read-write transaction
    +   * is on a multiplexed session.
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The precommitToken. + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken() { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + /** + * + * + *
    +   * Optional. A precommit token will be included if the read-write transaction
    +   * is on a multiplexed session.
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder + getPrecommitTokenOrBuilder() { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -682,6 +757,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (((bitField0_ & 0x00000002) != 0)) { output.writeMessage(5, getStats()); } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(8, getPrecommitToken()); + } getUnknownFields().writeTo(output); } @@ -706,6 +784,9 @@ public int getSerializedSize() { if (((bitField0_ & 0x00000002) != 0)) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getStats()); } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(8, getPrecommitToken()); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -732,6 +813,10 @@ public boolean equals(final java.lang.Object obj) { if (hasStats()) { if (!getStats().equals(other.getStats())) return false; } + if (hasPrecommitToken() != other.hasPrecommitToken()) return false; + if (hasPrecommitToken()) { + if (!getPrecommitToken().equals(other.getPrecommitToken())) return false; + } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @@ -759,6 +844,10 @@ public int hashCode() { hash = (37 * hash) + STATS_FIELD_NUMBER; hash = (53 * hash) + getStats().hashCode(); } + if (hasPrecommitToken()) { + hash = (37 * hash) + PRECOMMIT_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPrecommitToken().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -904,6 +993,7 @@ private void maybeForceBuilderInitialization() { getMetadataFieldBuilder(); getValuesFieldBuilder(); getStatsFieldBuilder(); + getPrecommitTokenFieldBuilder(); } } @@ -930,6 +1020,11 @@ public Builder clear() { statsBuilder_.dispose(); statsBuilder_ = null; } + precommitToken_ = null; + if (precommitTokenBuilder_ != null) { + precommitTokenBuilder_.dispose(); + precommitTokenBuilder_ = null; + } return this; } @@ -994,6 +1089,11 @@ private void buildPartial0(com.google.spanner.v1.PartialResultSet result) { result.stats_ = statsBuilder_ == null ? stats_ : statsBuilder_.build(); to_bitField0_ |= 0x00000002; } + if (((from_bitField0_ & 0x00000020) != 0)) { + result.precommitToken_ = + precommitTokenBuilder_ == null ? precommitToken_ : precommitTokenBuilder_.build(); + to_bitField0_ |= 0x00000004; + } result.bitField0_ |= to_bitField0_; } @@ -1081,6 +1181,9 @@ public Builder mergeFrom(com.google.spanner.v1.PartialResultSet other) { if (other.hasStats()) { mergeStats(other.getStats()); } + if (other.hasPrecommitToken()) { + mergePrecommitToken(other.getPrecommitToken()); + } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; @@ -1143,6 +1246,12 @@ public Builder mergeFrom( bitField0_ |= 0x00000010; break; } // case 42 + case 66: + { + input.readMessage(getPrecommitTokenFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000020; + break; + } // case 66 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { @@ -3346,6 +3455,268 @@ public com.google.spanner.v1.ResultSetStatsOrBuilder getStatsOrBuilder() { return statsBuilder_; } + private com.google.spanner.v1.MultiplexedSessionPrecommitToken precommitToken_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder> + precommitTokenBuilder_; + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the precommitToken field is set. + */ + public boolean hasPrecommitToken() { + return ((bitField0_ & 0x00000020) != 0); + } + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The precommitToken. + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken() { + if (precommitTokenBuilder_ == null) { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } else { + return precommitTokenBuilder_.getMessage(); + } + } + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setPrecommitToken(com.google.spanner.v1.MultiplexedSessionPrecommitToken value) { + if (precommitTokenBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + precommitToken_ = value; + } else { + precommitTokenBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setPrecommitToken( + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder builderForValue) { + if (precommitTokenBuilder_ == null) { + precommitToken_ = builderForValue.build(); + } else { + precommitTokenBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergePrecommitToken( + com.google.spanner.v1.MultiplexedSessionPrecommitToken value) { + if (precommitTokenBuilder_ == null) { + if (((bitField0_ & 0x00000020) != 0) + && precommitToken_ != null + && precommitToken_ + != com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance()) { + getPrecommitTokenBuilder().mergeFrom(value); + } else { + precommitToken_ = value; + } + } else { + precommitTokenBuilder_.mergeFrom(value); + } + if (precommitToken_ != null) { + bitField0_ |= 0x00000020; + onChanged(); + } + return this; + } + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearPrecommitToken() { + bitField0_ = (bitField0_ & ~0x00000020); + precommitToken_ = null; + if (precommitTokenBuilder_ != null) { + precommitTokenBuilder_.dispose(); + precommitTokenBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder + getPrecommitTokenBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return getPrecommitTokenFieldBuilder().getBuilder(); + } + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder + getPrecommitTokenOrBuilder() { + if (precommitTokenBuilder_ != null) { + return precommitTokenBuilder_.getMessageOrBuilder(); + } else { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + } + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder> + getPrecommitTokenFieldBuilder() { + if (precommitTokenBuilder_ == null) { + precommitTokenBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder>( + getPrecommitToken(), getParentForChildren(), isClean()); + precommitToken_ = null; + } + return precommitTokenBuilder_; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartialResultSetOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartialResultSetOrBuilder.java index 1081754cc61..fdf1f461f68 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartialResultSetOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartialResultSetOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/result_set.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface PartialResultSetOrBuilder @@ -554,4 +554,63 @@ public interface PartialResultSetOrBuilder * .google.spanner.v1.ResultSetStats stats = 5; */ com.google.spanner.v1.ResultSetStatsOrBuilder getStatsOrBuilder(); + + /** + * + * + *
    +   * Optional. A precommit token will be included if the read-write transaction
    +   * is on a multiplexed session.
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the precommitToken field is set. + */ + boolean hasPrecommitToken(); + /** + * + * + *
    +   * Optional. A precommit token will be included if the read-write transaction
    +   * is on a multiplexed session.
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The precommitToken. + */ + com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken(); + /** + * + * + *
    +   * Optional. A precommit token will be included if the read-write transaction
    +   * is on a multiplexed session.
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder getPrecommitTokenOrBuilder(); } diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Partition.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Partition.java index e77ed70d92e..6a15a91d006 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Partition.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Partition.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionOptions.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionOptions.java index e70ace69ad6..75d5ce4506d 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionOptions.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionOptions.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionOptionsOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionOptionsOrBuilder.java index 8b9e52b34ef..3deb5dac336 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionOptionsOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionOptionsOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface PartitionOptionsOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionOrBuilder.java index 386d63f89b2..835fa411ea3 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface PartitionOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionQueryRequest.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionQueryRequest.java index 03893ce419f..bc440470227 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionQueryRequest.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionQueryRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionQueryRequestOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionQueryRequestOrBuilder.java index bf08cd72393..cb9db5d4d56 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionQueryRequestOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionQueryRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface PartitionQueryRequestOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionReadRequest.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionReadRequest.java index f75572fb574..99f475d105d 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionReadRequest.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionReadRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionReadRequestOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionReadRequestOrBuilder.java index df37ed20495..6a88d89cc4d 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionReadRequestOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionReadRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface PartitionReadRequestOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionResponse.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionResponse.java index 376fb8879b5..39ce19b3e14 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionResponse.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionResponse.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionResponseOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionResponseOrBuilder.java index d282a3a6fa6..ff1cc442d0a 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionResponseOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PartitionResponseOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface PartitionResponseOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PlanNode.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PlanNode.java index 1e6970a89c6..315436a7291 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PlanNode.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PlanNode.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/query_plan.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PlanNodeOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PlanNodeOrBuilder.java index b0e6f0fd751..443e4ac7079 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PlanNodeOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/PlanNodeOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/query_plan.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface PlanNodeOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryPlan.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryPlan.java index 613337aa1ad..fe542ddeed6 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryPlan.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryPlan.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/query_plan.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryPlanOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryPlanOrBuilder.java index c3595ec4d32..e1f67c4e1f0 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryPlanOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryPlanOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/query_plan.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface QueryPlanOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryPlanProto.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryPlanProto.java index 083ed3346fe..858e911a6b6 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryPlanProto.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/QueryPlanProto.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/query_plan.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public final class QueryPlanProto { diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ReadRequest.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ReadRequest.java index 24365c09f01..7c73c96dfbf 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ReadRequest.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ReadRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** @@ -46,6 +46,8 @@ private ReadRequest() { columns_ = com.google.protobuf.LazyStringArrayList.emptyList(); resumeToken_ = com.google.protobuf.ByteString.EMPTY; partitionToken_ = com.google.protobuf.ByteString.EMPTY; + orderBy_ = 0; + lockHint_ = 0; } @java.lang.Override @@ -69,6 +71,402 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { com.google.spanner.v1.ReadRequest.Builder.class); } + /** + * + * + *
    +   * An option to control the order in which rows are returned from a read.
    +   * 
    + * + * Protobuf enum {@code google.spanner.v1.ReadRequest.OrderBy} + */ + public enum OrderBy implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Default value.
    +     *
    +     * ORDER_BY_UNSPECIFIED is equivalent to ORDER_BY_PRIMARY_KEY.
    +     * 
    + * + * ORDER_BY_UNSPECIFIED = 0; + */ + ORDER_BY_UNSPECIFIED(0), + /** + * + * + *
    +     * Read rows are returned in primary key order.
    +     *
    +     * In the event that this option is used in conjunction with the
    +     * `partition_token` field, the API will return an `INVALID_ARGUMENT` error.
    +     * 
    + * + * ORDER_BY_PRIMARY_KEY = 1; + */ + ORDER_BY_PRIMARY_KEY(1), + /** + * + * + *
    +     * Read rows are returned in any order.
    +     * 
    + * + * ORDER_BY_NO_ORDER = 2; + */ + ORDER_BY_NO_ORDER(2), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
    +     * Default value.
    +     *
    +     * ORDER_BY_UNSPECIFIED is equivalent to ORDER_BY_PRIMARY_KEY.
    +     * 
    + * + * ORDER_BY_UNSPECIFIED = 0; + */ + public static final int ORDER_BY_UNSPECIFIED_VALUE = 0; + /** + * + * + *
    +     * Read rows are returned in primary key order.
    +     *
    +     * In the event that this option is used in conjunction with the
    +     * `partition_token` field, the API will return an `INVALID_ARGUMENT` error.
    +     * 
    + * + * ORDER_BY_PRIMARY_KEY = 1; + */ + public static final int ORDER_BY_PRIMARY_KEY_VALUE = 1; + /** + * + * + *
    +     * Read rows are returned in any order.
    +     * 
    + * + * ORDER_BY_NO_ORDER = 2; + */ + public static final int ORDER_BY_NO_ORDER_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OrderBy valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static OrderBy forNumber(int value) { + switch (value) { + case 0: + return ORDER_BY_UNSPECIFIED; + case 1: + return ORDER_BY_PRIMARY_KEY; + case 2: + return ORDER_BY_NO_ORDER; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public OrderBy findValueByNumber(int number) { + return OrderBy.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.v1.ReadRequest.getDescriptor().getEnumTypes().get(0); + } + + private static final OrderBy[] VALUES = values(); + + public static OrderBy valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private OrderBy(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.v1.ReadRequest.OrderBy) + } + + /** + * + * + *
    +   * A lock hint mechanism for reads done within a transaction.
    +   * 
    + * + * Protobuf enum {@code google.spanner.v1.ReadRequest.LockHint} + */ + public enum LockHint implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
    +     * Default value.
    +     *
    +     * LOCK_HINT_UNSPECIFIED is equivalent to LOCK_HINT_SHARED.
    +     * 
    + * + * LOCK_HINT_UNSPECIFIED = 0; + */ + LOCK_HINT_UNSPECIFIED(0), + /** + * + * + *
    +     * Acquire shared locks.
    +     *
    +     * By default when you perform a read as part of a read-write transaction,
    +     * Spanner acquires shared read locks, which allows other reads to still
    +     * access the data until your transaction is ready to commit. When your
    +     * transaction is committing and writes are being applied, the transaction
    +     * attempts to upgrade to an exclusive lock for any data you are writing.
    +     * For more information about locks, see [Lock
    +     * modes](https://cloud.google.com/spanner/docs/introspection/lock-statistics#explain-lock-modes).
    +     * 
    + * + * LOCK_HINT_SHARED = 1; + */ + LOCK_HINT_SHARED(1), + /** + * + * + *
    +     * Acquire exclusive locks.
    +     *
    +     * Requesting exclusive locks is beneficial if you observe high write
    +     * contention, which means you notice that multiple transactions are
    +     * concurrently trying to read and write to the same data, resulting in a
    +     * large number of aborts. This problem occurs when two transactions
    +     * initially acquire shared locks and then both try to upgrade to exclusive
    +     * locks at the same time. In this situation both transactions are waiting
    +     * for the other to give up their lock, resulting in a deadlocked situation.
    +     * Spanner is able to detect this occurring and force one of the
    +     * transactions to abort. However, this is a slow and expensive operation
    +     * and results in lower performance. In this case it makes sense to acquire
    +     * exclusive locks at the start of the transaction because then when
    +     * multiple transactions try to act on the same data, they automatically get
    +     * serialized. Each transaction waits its turn to acquire the lock and
    +     * avoids getting into deadlock situations.
    +     *
    +     * Because the exclusive lock hint is just a hint, it should not be
    +     * considered equivalent to a mutex. In other words, you should not use
    +     * Spanner exclusive locks as a mutual exclusion mechanism for the execution
    +     * of code outside of Spanner.
    +     *
    +     * **Note:** Request exclusive locks judiciously because they block others
    +     * from reading that data for the entire transaction, rather than just when
    +     * the writes are being performed. Unless you observe high write contention,
    +     * you should use the default of shared read locks so you don't prematurely
    +     * block other clients from reading the data that you're writing to.
    +     * 
    + * + * LOCK_HINT_EXCLUSIVE = 2; + */ + LOCK_HINT_EXCLUSIVE(2), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
    +     * Default value.
    +     *
    +     * LOCK_HINT_UNSPECIFIED is equivalent to LOCK_HINT_SHARED.
    +     * 
    + * + * LOCK_HINT_UNSPECIFIED = 0; + */ + public static final int LOCK_HINT_UNSPECIFIED_VALUE = 0; + /** + * + * + *
    +     * Acquire shared locks.
    +     *
    +     * By default when you perform a read as part of a read-write transaction,
    +     * Spanner acquires shared read locks, which allows other reads to still
    +     * access the data until your transaction is ready to commit. When your
    +     * transaction is committing and writes are being applied, the transaction
    +     * attempts to upgrade to an exclusive lock for any data you are writing.
    +     * For more information about locks, see [Lock
    +     * modes](https://cloud.google.com/spanner/docs/introspection/lock-statistics#explain-lock-modes).
    +     * 
    + * + * LOCK_HINT_SHARED = 1; + */ + public static final int LOCK_HINT_SHARED_VALUE = 1; + /** + * + * + *
    +     * Acquire exclusive locks.
    +     *
    +     * Requesting exclusive locks is beneficial if you observe high write
    +     * contention, which means you notice that multiple transactions are
    +     * concurrently trying to read and write to the same data, resulting in a
    +     * large number of aborts. This problem occurs when two transactions
    +     * initially acquire shared locks and then both try to upgrade to exclusive
    +     * locks at the same time. In this situation both transactions are waiting
    +     * for the other to give up their lock, resulting in a deadlocked situation.
    +     * Spanner is able to detect this occurring and force one of the
    +     * transactions to abort. However, this is a slow and expensive operation
    +     * and results in lower performance. In this case it makes sense to acquire
    +     * exclusive locks at the start of the transaction because then when
    +     * multiple transactions try to act on the same data, they automatically get
    +     * serialized. Each transaction waits its turn to acquire the lock and
    +     * avoids getting into deadlock situations.
    +     *
    +     * Because the exclusive lock hint is just a hint, it should not be
    +     * considered equivalent to a mutex. In other words, you should not use
    +     * Spanner exclusive locks as a mutual exclusion mechanism for the execution
    +     * of code outside of Spanner.
    +     *
    +     * **Note:** Request exclusive locks judiciously because they block others
    +     * from reading that data for the entire transaction, rather than just when
    +     * the writes are being performed. Unless you observe high write contention,
    +     * you should use the default of shared read locks so you don't prematurely
    +     * block other clients from reading the data that you're writing to.
    +     * 
    + * + * LOCK_HINT_EXCLUSIVE = 2; + */ + public static final int LOCK_HINT_EXCLUSIVE_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static LockHint valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static LockHint forNumber(int value) { + switch (value) { + case 0: + return LOCK_HINT_UNSPECIFIED; + case 1: + return LOCK_HINT_SHARED; + case 2: + return LOCK_HINT_EXCLUSIVE; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public LockHint findValueByNumber(int number) { + return LockHint.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.spanner.v1.ReadRequest.getDescriptor().getEnumTypes().get(1); + } + + private static final LockHint[] VALUES = values(); + + public static LockHint valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private LockHint(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.spanner.v1.ReadRequest.LockHint) + } + private int bitField0_; public static final int SESSION_FIELD_NUMBER = 1; @@ -635,6 +1033,98 @@ public boolean getDataBoostEnabled() { return dataBoostEnabled_; } + public static final int ORDER_BY_FIELD_NUMBER = 16; + private int orderBy_ = 0; + /** + * + * + *
    +   * Optional. Order for the returned rows.
    +   *
    +   * By default, Spanner will return result rows in primary key order except for
    +   * PartitionRead requests. For applications that do not require rows to be
    +   * returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting
    +   * `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval,
    +   * resulting in lower latencies in certain cases (e.g. bulk point lookups).
    +   * 
    + * + * + * .google.spanner.v1.ReadRequest.OrderBy order_by = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for orderBy. + */ + @java.lang.Override + public int getOrderByValue() { + return orderBy_; + } + /** + * + * + *
    +   * Optional. Order for the returned rows.
    +   *
    +   * By default, Spanner will return result rows in primary key order except for
    +   * PartitionRead requests. For applications that do not require rows to be
    +   * returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting
    +   * `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval,
    +   * resulting in lower latencies in certain cases (e.g. bulk point lookups).
    +   * 
    + * + * + * .google.spanner.v1.ReadRequest.OrderBy order_by = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The orderBy. + */ + @java.lang.Override + public com.google.spanner.v1.ReadRequest.OrderBy getOrderBy() { + com.google.spanner.v1.ReadRequest.OrderBy result = + com.google.spanner.v1.ReadRequest.OrderBy.forNumber(orderBy_); + return result == null ? com.google.spanner.v1.ReadRequest.OrderBy.UNRECOGNIZED : result; + } + + public static final int LOCK_HINT_FIELD_NUMBER = 17; + private int lockHint_ = 0; + /** + * + * + *
    +   * Optional. Lock Hint for the request, it can only be used with read-write
    +   * transactions.
    +   * 
    + * + * + * .google.spanner.v1.ReadRequest.LockHint lock_hint = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for lockHint. + */ + @java.lang.Override + public int getLockHintValue() { + return lockHint_; + } + /** + * + * + *
    +   * Optional. Lock Hint for the request, it can only be used with read-write
    +   * transactions.
    +   * 
    + * + * + * .google.spanner.v1.ReadRequest.LockHint lock_hint = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The lockHint. + */ + @java.lang.Override + public com.google.spanner.v1.ReadRequest.LockHint getLockHint() { + com.google.spanner.v1.ReadRequest.LockHint result = + com.google.spanner.v1.ReadRequest.LockHint.forNumber(lockHint_); + return result == null ? com.google.spanner.v1.ReadRequest.LockHint.UNRECOGNIZED : result; + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -685,6 +1175,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (dataBoostEnabled_ != false) { output.writeBool(15, dataBoostEnabled_); } + if (orderBy_ != com.google.spanner.v1.ReadRequest.OrderBy.ORDER_BY_UNSPECIFIED.getNumber()) { + output.writeEnum(16, orderBy_); + } + if (lockHint_ != com.google.spanner.v1.ReadRequest.LockHint.LOCK_HINT_UNSPECIFIED.getNumber()) { + output.writeEnum(17, lockHint_); + } getUnknownFields().writeTo(output); } @@ -736,6 +1232,12 @@ public int getSerializedSize() { if (dataBoostEnabled_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize(15, dataBoostEnabled_); } + if (orderBy_ != com.google.spanner.v1.ReadRequest.OrderBy.ORDER_BY_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(16, orderBy_); + } + if (lockHint_ != com.google.spanner.v1.ReadRequest.LockHint.LOCK_HINT_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(17, lockHint_); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -775,6 +1277,8 @@ public boolean equals(final java.lang.Object obj) { if (!getDirectedReadOptions().equals(other.getDirectedReadOptions())) return false; } if (getDataBoostEnabled() != other.getDataBoostEnabled()) return false; + if (orderBy_ != other.orderBy_) return false; + if (lockHint_ != other.lockHint_) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @@ -820,6 +1324,10 @@ public int hashCode() { } hash = (37 * hash) + DATA_BOOST_ENABLED_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getDataBoostEnabled()); + hash = (37 * hash) + ORDER_BY_FIELD_NUMBER; + hash = (53 * hash) + orderBy_; + hash = (37 * hash) + LOCK_HINT_FIELD_NUMBER; + hash = (53 * hash) + lockHint_; hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -999,6 +1507,8 @@ public Builder clear() { directedReadOptionsBuilder_ = null; } dataBoostEnabled_ = false; + orderBy_ = 0; + lockHint_ = 0; return this; } @@ -1081,6 +1591,12 @@ private void buildPartial0(com.google.spanner.v1.ReadRequest result) { if (((from_bitField0_ & 0x00000800) != 0)) { result.dataBoostEnabled_ = dataBoostEnabled_; } + if (((from_bitField0_ & 0x00001000) != 0)) { + result.orderBy_ = orderBy_; + } + if (((from_bitField0_ & 0x00002000) != 0)) { + result.lockHint_ = lockHint_; + } result.bitField0_ |= to_bitField0_; } @@ -1178,6 +1694,12 @@ public Builder mergeFrom(com.google.spanner.v1.ReadRequest other) { if (other.getDataBoostEnabled() != false) { setDataBoostEnabled(other.getDataBoostEnabled()); } + if (other.orderBy_ != 0) { + setOrderByValue(other.getOrderByValue()); + } + if (other.lockHint_ != 0) { + setLockHintValue(other.getLockHintValue()); + } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; @@ -1278,6 +1800,18 @@ public Builder mergeFrom( bitField0_ |= 0x00000800; break; } // case 120 + case 128: + { + orderBy_ = input.readEnum(); + bitField0_ |= 0x00001000; + break; + } // case 128 + case 136: + { + lockHint_ = input.readEnum(); + bitField0_ |= 0x00002000; + break; + } // case 136 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { @@ -2974,6 +3508,241 @@ public Builder clearDataBoostEnabled() { return this; } + private int orderBy_ = 0; + /** + * + * + *
    +     * Optional. Order for the returned rows.
    +     *
    +     * By default, Spanner will return result rows in primary key order except for
    +     * PartitionRead requests. For applications that do not require rows to be
    +     * returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting
    +     * `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval,
    +     * resulting in lower latencies in certain cases (e.g. bulk point lookups).
    +     * 
    + * + * + * .google.spanner.v1.ReadRequest.OrderBy order_by = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for orderBy. + */ + @java.lang.Override + public int getOrderByValue() { + return orderBy_; + } + /** + * + * + *
    +     * Optional. Order for the returned rows.
    +     *
    +     * By default, Spanner will return result rows in primary key order except for
    +     * PartitionRead requests. For applications that do not require rows to be
    +     * returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting
    +     * `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval,
    +     * resulting in lower latencies in certain cases (e.g. bulk point lookups).
    +     * 
    + * + * + * .google.spanner.v1.ReadRequest.OrderBy order_by = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The enum numeric value on the wire for orderBy to set. + * @return This builder for chaining. + */ + public Builder setOrderByValue(int value) { + orderBy_ = value; + bitField0_ |= 0x00001000; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. Order for the returned rows.
    +     *
    +     * By default, Spanner will return result rows in primary key order except for
    +     * PartitionRead requests. For applications that do not require rows to be
    +     * returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting
    +     * `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval,
    +     * resulting in lower latencies in certain cases (e.g. bulk point lookups).
    +     * 
    + * + * + * .google.spanner.v1.ReadRequest.OrderBy order_by = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The orderBy. + */ + @java.lang.Override + public com.google.spanner.v1.ReadRequest.OrderBy getOrderBy() { + com.google.spanner.v1.ReadRequest.OrderBy result = + com.google.spanner.v1.ReadRequest.OrderBy.forNumber(orderBy_); + return result == null ? com.google.spanner.v1.ReadRequest.OrderBy.UNRECOGNIZED : result; + } + /** + * + * + *
    +     * Optional. Order for the returned rows.
    +     *
    +     * By default, Spanner will return result rows in primary key order except for
    +     * PartitionRead requests. For applications that do not require rows to be
    +     * returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting
    +     * `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval,
    +     * resulting in lower latencies in certain cases (e.g. bulk point lookups).
    +     * 
    + * + * + * .google.spanner.v1.ReadRequest.OrderBy order_by = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The orderBy to set. + * @return This builder for chaining. + */ + public Builder setOrderBy(com.google.spanner.v1.ReadRequest.OrderBy value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00001000; + orderBy_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. Order for the returned rows.
    +     *
    +     * By default, Spanner will return result rows in primary key order except for
    +     * PartitionRead requests. For applications that do not require rows to be
    +     * returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting
    +     * `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval,
    +     * resulting in lower latencies in certain cases (e.g. bulk point lookups).
    +     * 
    + * + * + * .google.spanner.v1.ReadRequest.OrderBy order_by = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearOrderBy() { + bitField0_ = (bitField0_ & ~0x00001000); + orderBy_ = 0; + onChanged(); + return this; + } + + private int lockHint_ = 0; + /** + * + * + *
    +     * Optional. Lock Hint for the request, it can only be used with read-write
    +     * transactions.
    +     * 
    + * + * + * .google.spanner.v1.ReadRequest.LockHint lock_hint = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for lockHint. + */ + @java.lang.Override + public int getLockHintValue() { + return lockHint_; + } + /** + * + * + *
    +     * Optional. Lock Hint for the request, it can only be used with read-write
    +     * transactions.
    +     * 
    + * + * + * .google.spanner.v1.ReadRequest.LockHint lock_hint = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The enum numeric value on the wire for lockHint to set. + * @return This builder for chaining. + */ + public Builder setLockHintValue(int value) { + lockHint_ = value; + bitField0_ |= 0x00002000; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. Lock Hint for the request, it can only be used with read-write
    +     * transactions.
    +     * 
    + * + * + * .google.spanner.v1.ReadRequest.LockHint lock_hint = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The lockHint. + */ + @java.lang.Override + public com.google.spanner.v1.ReadRequest.LockHint getLockHint() { + com.google.spanner.v1.ReadRequest.LockHint result = + com.google.spanner.v1.ReadRequest.LockHint.forNumber(lockHint_); + return result == null ? com.google.spanner.v1.ReadRequest.LockHint.UNRECOGNIZED : result; + } + /** + * + * + *
    +     * Optional. Lock Hint for the request, it can only be used with read-write
    +     * transactions.
    +     * 
    + * + * + * .google.spanner.v1.ReadRequest.LockHint lock_hint = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The lockHint to set. + * @return This builder for chaining. + */ + public Builder setLockHint(com.google.spanner.v1.ReadRequest.LockHint value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00002000; + lockHint_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. Lock Hint for the request, it can only be used with read-write
    +     * transactions.
    +     * 
    + * + * + * .google.spanner.v1.ReadRequest.LockHint lock_hint = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearLockHint() { + bitField0_ = (bitField0_ & ~0x00002000); + lockHint_ = 0; + onChanged(); + return this; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ReadRequestOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ReadRequestOrBuilder.java index 16358d0d5e0..9e16532ff3c 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ReadRequestOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ReadRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface ReadRequestOrBuilder @@ -421,4 +421,76 @@ public interface ReadRequestOrBuilder * @return The dataBoostEnabled. */ boolean getDataBoostEnabled(); + + /** + * + * + *
    +   * Optional. Order for the returned rows.
    +   *
    +   * By default, Spanner will return result rows in primary key order except for
    +   * PartitionRead requests. For applications that do not require rows to be
    +   * returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting
    +   * `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval,
    +   * resulting in lower latencies in certain cases (e.g. bulk point lookups).
    +   * 
    + * + * + * .google.spanner.v1.ReadRequest.OrderBy order_by = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for orderBy. + */ + int getOrderByValue(); + /** + * + * + *
    +   * Optional. Order for the returned rows.
    +   *
    +   * By default, Spanner will return result rows in primary key order except for
    +   * PartitionRead requests. For applications that do not require rows to be
    +   * returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting
    +   * `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval,
    +   * resulting in lower latencies in certain cases (e.g. bulk point lookups).
    +   * 
    + * + * + * .google.spanner.v1.ReadRequest.OrderBy order_by = 16 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The orderBy. + */ + com.google.spanner.v1.ReadRequest.OrderBy getOrderBy(); + + /** + * + * + *
    +   * Optional. Lock Hint for the request, it can only be used with read-write
    +   * transactions.
    +   * 
    + * + * + * .google.spanner.v1.ReadRequest.LockHint lock_hint = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for lockHint. + */ + int getLockHintValue(); + /** + * + * + *
    +   * Optional. Lock Hint for the request, it can only be used with read-write
    +   * transactions.
    +   * 
    + * + * + * .google.spanner.v1.ReadRequest.LockHint lock_hint = 17 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The lockHint. + */ + com.google.spanner.v1.ReadRequest.LockHint getLockHint(); } diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RequestOptions.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RequestOptions.java index b39a445b4fe..6df3f2b3fae 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RequestOptions.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RequestOptions.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RequestOptionsOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RequestOptionsOrBuilder.java index 4299be9da0f..97b5a3eda32 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RequestOptionsOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RequestOptionsOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface RequestOptionsOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSet.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSet.java index 8203c9a936c..42ee5817b9e 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSet.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSet.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/result_set.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** @@ -276,6 +276,81 @@ public com.google.spanner.v1.ResultSetStatsOrBuilder getStatsOrBuilder() { return stats_ == null ? com.google.spanner.v1.ResultSetStats.getDefaultInstance() : stats_; } + public static final int PRECOMMIT_TOKEN_FIELD_NUMBER = 5; + private com.google.spanner.v1.MultiplexedSessionPrecommitToken precommitToken_; + /** + * + * + *
    +   * Optional. A precommit token will be included if the read-write transaction
    +   * is on a multiplexed session.
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the precommitToken field is set. + */ + @java.lang.Override + public boolean hasPrecommitToken() { + return ((bitField0_ & 0x00000004) != 0); + } + /** + * + * + *
    +   * Optional. A precommit token will be included if the read-write transaction
    +   * is on a multiplexed session.
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The precommitToken. + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken() { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + /** + * + * + *
    +   * Optional. A precommit token will be included if the read-write transaction
    +   * is on a multiplexed session.
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder + getPrecommitTokenOrBuilder() { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -299,6 +374,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (((bitField0_ & 0x00000002) != 0)) { output.writeMessage(3, getStats()); } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(5, getPrecommitToken()); + } getUnknownFields().writeTo(output); } @@ -317,6 +395,9 @@ public int getSerializedSize() { if (((bitField0_ & 0x00000002) != 0)) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getStats()); } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getPrecommitToken()); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -341,6 +422,10 @@ public boolean equals(final java.lang.Object obj) { if (hasStats()) { if (!getStats().equals(other.getStats())) return false; } + if (hasPrecommitToken() != other.hasPrecommitToken()) return false; + if (hasPrecommitToken()) { + if (!getPrecommitToken().equals(other.getPrecommitToken())) return false; + } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @@ -364,6 +449,10 @@ public int hashCode() { hash = (37 * hash) + STATS_FIELD_NUMBER; hash = (53 * hash) + getStats().hashCode(); } + if (hasPrecommitToken()) { + hash = (37 * hash) + PRECOMMIT_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPrecommitToken().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -506,6 +595,7 @@ private void maybeForceBuilderInitialization() { getMetadataFieldBuilder(); getRowsFieldBuilder(); getStatsFieldBuilder(); + getPrecommitTokenFieldBuilder(); } } @@ -530,6 +620,11 @@ public Builder clear() { statsBuilder_.dispose(); statsBuilder_ = null; } + precommitToken_ = null; + if (precommitTokenBuilder_ != null) { + precommitTokenBuilder_.dispose(); + precommitTokenBuilder_ = null; + } return this; } @@ -587,6 +682,11 @@ private void buildPartial0(com.google.spanner.v1.ResultSet result) { result.stats_ = statsBuilder_ == null ? stats_ : statsBuilder_.build(); to_bitField0_ |= 0x00000002; } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.precommitToken_ = + precommitTokenBuilder_ == null ? precommitToken_ : precommitTokenBuilder_.build(); + to_bitField0_ |= 0x00000004; + } result.bitField0_ |= to_bitField0_; } @@ -668,6 +768,9 @@ public Builder mergeFrom(com.google.spanner.v1.ResultSet other) { if (other.hasStats()) { mergeStats(other.getStats()); } + if (other.hasPrecommitToken()) { + mergePrecommitToken(other.getPrecommitToken()); + } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; @@ -718,6 +821,12 @@ public Builder mergeFrom( bitField0_ |= 0x00000004; break; } // case 26 + case 42: + { + input.readMessage(getPrecommitTokenFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 42 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { @@ -1598,6 +1707,268 @@ public com.google.spanner.v1.ResultSetStatsOrBuilder getStatsOrBuilder() { return statsBuilder_; } + private com.google.spanner.v1.MultiplexedSessionPrecommitToken precommitToken_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder> + precommitTokenBuilder_; + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the precommitToken field is set. + */ + public boolean hasPrecommitToken() { + return ((bitField0_ & 0x00000008) != 0); + } + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The precommitToken. + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken() { + if (precommitTokenBuilder_ == null) { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } else { + return precommitTokenBuilder_.getMessage(); + } + } + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setPrecommitToken(com.google.spanner.v1.MultiplexedSessionPrecommitToken value) { + if (precommitTokenBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + precommitToken_ = value; + } else { + precommitTokenBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setPrecommitToken( + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder builderForValue) { + if (precommitTokenBuilder_ == null) { + precommitToken_ = builderForValue.build(); + } else { + precommitTokenBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergePrecommitToken( + com.google.spanner.v1.MultiplexedSessionPrecommitToken value) { + if (precommitTokenBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && precommitToken_ != null + && precommitToken_ + != com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance()) { + getPrecommitTokenBuilder().mergeFrom(value); + } else { + precommitToken_ = value; + } + } else { + precommitTokenBuilder_.mergeFrom(value); + } + if (precommitToken_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearPrecommitToken() { + bitField0_ = (bitField0_ & ~0x00000008); + precommitToken_ = null; + if (precommitTokenBuilder_ != null) { + precommitTokenBuilder_.dispose(); + precommitTokenBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder + getPrecommitTokenBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getPrecommitTokenFieldBuilder().getBuilder(); + } + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder + getPrecommitTokenOrBuilder() { + if (precommitTokenBuilder_ != null) { + return precommitTokenBuilder_.getMessageOrBuilder(); + } else { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + } + /** + * + * + *
    +     * Optional. A precommit token will be included if the read-write transaction
    +     * is on a multiplexed session.
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the
    +     * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder> + getPrecommitTokenFieldBuilder() { + if (precommitTokenBuilder_ == null) { + precommitTokenBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder>( + getPrecommitToken(), getParentForChildren(), isClean()); + precommitToken_ = null; + } + return precommitTokenBuilder_; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetMetadata.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetMetadata.java index c9b1cbe03e6..f87dcaf1d9d 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetMetadata.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetMetadata.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/result_set.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetMetadataOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetMetadataOrBuilder.java index 81ad710c41f..6680d86b3fb 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetMetadataOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetMetadataOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/result_set.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface ResultSetMetadataOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetOrBuilder.java index 89e26d5ce52..3ce8f7d3e2d 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/result_set.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface ResultSetOrBuilder @@ -190,4 +190,63 @@ public interface ResultSetOrBuilder * .google.spanner.v1.ResultSetStats stats = 3; */ com.google.spanner.v1.ResultSetStatsOrBuilder getStatsOrBuilder(); + + /** + * + * + *
    +   * Optional. A precommit token will be included if the read-write transaction
    +   * is on a multiplexed session.
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the precommitToken field is set. + */ + boolean hasPrecommitToken(); + /** + * + * + *
    +   * Optional. A precommit token will be included if the read-write transaction
    +   * is on a multiplexed session.
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The precommitToken. + */ + com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken(); + /** + * + * + *
    +   * Optional. A precommit token will be included if the read-write transaction
    +   * is on a multiplexed session.
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the
    +   * [Commit][google.spanner.v1.Spanner.Commit] request for this transaction.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 5 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder getPrecommitTokenOrBuilder(); } diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetProto.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetProto.java index 8bccc6dd24e..93b78d65669 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetProto.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetProto.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/result_set.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public final class ResultSetProto { @@ -54,39 +54,45 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { static { java.lang.String[] descriptorData = { "\n\"google/spanner/v1/result_set.proto\022\021go" - + "ogle.spanner.v1\032\034google/protobuf/struct." - + "proto\032\"google/spanner/v1/query_plan.prot" - + "o\032#google/spanner/v1/transaction.proto\032\034" - + "google/spanner/v1/type.proto\"\237\001\n\tResultS" - + "et\0226\n\010metadata\030\001 \001(\0132$.google.spanner.v1" - + ".ResultSetMetadata\022(\n\004rows\030\002 \003(\0132\032.googl" - + "e.protobuf.ListValue\0220\n\005stats\030\003 \001(\0132!.go" - + "ogle.spanner.v1.ResultSetStats\"\321\001\n\020Parti" - + "alResultSet\0226\n\010metadata\030\001 \001(\0132$.google.s" - + "panner.v1.ResultSetMetadata\022&\n\006values\030\002 " - + "\003(\0132\026.google.protobuf.Value\022\025\n\rchunked_v" - + "alue\030\003 \001(\010\022\024\n\014resume_token\030\004 \001(\014\0220\n\005stat" - + "s\030\005 \001(\0132!.google.spanner.v1.ResultSetSta" - + "ts\"\267\001\n\021ResultSetMetadata\022/\n\010row_type\030\001 \001" - + "(\0132\035.google.spanner.v1.StructType\0223\n\013tra" - + "nsaction\030\002 \001(\0132\036.google.spanner.v1.Trans" - + "action\022<\n\025undeclared_parameters\030\003 \001(\0132\035." - + "google.spanner.v1.StructType\"\271\001\n\016ResultS" - + "etStats\0220\n\nquery_plan\030\001 \001(\0132\034.google.spa" - + "nner.v1.QueryPlan\022,\n\013query_stats\030\002 \001(\0132\027" - + ".google.protobuf.Struct\022\031\n\017row_count_exa" - + "ct\030\003 \001(\003H\000\022\037\n\025row_count_lower_bound\030\004 \001(" - + "\003H\000B\013\n\trow_countB\264\001\n\025com.google.spanner." - + "v1B\016ResultSetProtoP\001Z5cloud.google.com/g" - + "o/spanner/apiv1/spannerpb;spannerpb\370\001\001\252\002" - + "\027Google.Cloud.Spanner.V1\312\002\027Google\\Cloud\\" - + "Spanner\\V1\352\002\032Google::Cloud::Spanner::V1b" - + "\006proto3" + + "ogle.spanner.v1\032\037google/api/field_behavi" + + "or.proto\032\034google/protobuf/struct.proto\032\"" + + "google/spanner/v1/query_plan.proto\032#goog" + + "le/spanner/v1/transaction.proto\032\034google/" + + "spanner/v1/type.proto\"\362\001\n\tResultSet\0226\n\010m" + + "etadata\030\001 \001(\0132$.google.spanner.v1.Result" + + "SetMetadata\022(\n\004rows\030\002 \003(\0132\032.google.proto" + + "buf.ListValue\0220\n\005stats\030\003 \001(\0132!.google.sp" + + "anner.v1.ResultSetStats\022Q\n\017precommit_tok" + + "en\030\005 \001(\01323.google.spanner.v1.Multiplexed" + + "SessionPrecommitTokenB\003\340A\001\"\244\002\n\020PartialRe" + + "sultSet\0226\n\010metadata\030\001 \001(\0132$.google.spann" + + "er.v1.ResultSetMetadata\022&\n\006values\030\002 \003(\0132" + + "\026.google.protobuf.Value\022\025\n\rchunked_value" + + "\030\003 \001(\010\022\024\n\014resume_token\030\004 \001(\014\0220\n\005stats\030\005 " + + "\001(\0132!.google.spanner.v1.ResultSetStats\022Q" + + "\n\017precommit_token\030\010 \001(\01323.google.spanner" + + ".v1.MultiplexedSessionPrecommitTokenB\003\340A" + + "\001\"\267\001\n\021ResultSetMetadata\022/\n\010row_type\030\001 \001(" + + "\0132\035.google.spanner.v1.StructType\0223\n\013tran" + + "saction\030\002 \001(\0132\036.google.spanner.v1.Transa" + + "ction\022<\n\025undeclared_parameters\030\003 \001(\0132\035.g" + + "oogle.spanner.v1.StructType\"\271\001\n\016ResultSe" + + "tStats\0220\n\nquery_plan\030\001 \001(\0132\034.google.span" + + "ner.v1.QueryPlan\022,\n\013query_stats\030\002 \001(\0132\027." + + "google.protobuf.Struct\022\031\n\017row_count_exac" + + "t\030\003 \001(\003H\000\022\037\n\025row_count_lower_bound\030\004 \001(\003" + + "H\000B\013\n\trow_countB\264\001\n\025com.google.spanner.v" + + "1B\016ResultSetProtoP\001Z5cloud.google.com/go" + + "/spanner/apiv1/spannerpb;spannerpb\370\001\001\252\002\027" + + "Google.Cloud.Spanner.V1\312\002\027Google\\Cloud\\S" + + "panner\\V1\352\002\032Google::Cloud::Spanner::V1b\006" + + "proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.FieldBehaviorProto.getDescriptor(), com.google.protobuf.StructProto.getDescriptor(), com.google.spanner.v1.QueryPlanProto.getDescriptor(), com.google.spanner.v1.TransactionProto.getDescriptor(), @@ -98,7 +104,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_v1_ResultSet_descriptor, new java.lang.String[] { - "Metadata", "Rows", "Stats", + "Metadata", "Rows", "Stats", "PrecommitToken", }); internal_static_google_spanner_v1_PartialResultSet_descriptor = getDescriptor().getMessageTypes().get(1); @@ -106,7 +112,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_v1_PartialResultSet_descriptor, new java.lang.String[] { - "Metadata", "Values", "ChunkedValue", "ResumeToken", "Stats", + "Metadata", "Values", "ChunkedValue", "ResumeToken", "Stats", "PrecommitToken", }); internal_static_google_spanner_v1_ResultSetMetadata_descriptor = getDescriptor().getMessageTypes().get(2); @@ -124,6 +130,12 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new java.lang.String[] { "QueryPlan", "QueryStats", "RowCountExact", "RowCountLowerBound", "RowCount", }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + com.google.api.FieldBehaviorProto.getDescriptor(); com.google.protobuf.StructProto.getDescriptor(); com.google.spanner.v1.QueryPlanProto.getDescriptor(); com.google.spanner.v1.TransactionProto.getDescriptor(); diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetStats.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetStats.java index 0d23f7f8d86..ff87998e87f 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetStats.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetStats.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/result_set.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetStatsOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetStatsOrBuilder.java index f530b8c9fb8..3362ee8e08d 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetStatsOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/ResultSetStatsOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/result_set.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface ResultSetStatsOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RollbackRequest.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RollbackRequest.java index c4ba83281f4..caa8e5d74c9 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RollbackRequest.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RollbackRequest.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RollbackRequestOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RollbackRequestOrBuilder.java index e054a8978f6..2bfdefaf8de 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RollbackRequestOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/RollbackRequestOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface RollbackRequestOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Session.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Session.java index 99a29fabbf3..e4979e05f66 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Session.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Session.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/SessionOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/SessionOrBuilder.java index f8669e27f67..cefd62dc560 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/SessionOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/SessionOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface SessionOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/SpannerProto.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/SpannerProto.java index e035529b2ae..d8679c5b35d 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/SpannerProto.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/SpannerProto.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/spanner.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public final class SpannerProto { @@ -236,7 +236,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "aSelection\022\036\n\026auto_failover_disabled\030\002 \001" + "(\010\032f\n\017ExcludeReplicas\022S\n\022replica_selecti" + "ons\030\001 \003(\01327.google.spanner.v1.DirectedRe" - + "adOptions.ReplicaSelectionB\n\n\010replicas\"\307" + + "adOptions.ReplicaSelectionB\n\n\010replicas\"\360" + "\006\n\021ExecuteSqlRequest\0227\n\007session\030\001 \001(\tB&\340" + "A\002\372A \n\036spanner.googleapis.com/Session\022;\n" + "\013transaction\030\002 \001(\0132&.google.spanner.v1.T" @@ -256,174 +256,188 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "ueryOptions\022\031\n\021optimizer_version\030\001 \001(\t\022$" + "\n\034optimizer_statistics_package\030\002 \001(\t\032J\n\017" + "ParamTypesEntry\022\013\n\003key\030\001 \001(\t\022&\n\005value\030\002 " - + "\001(\0132\027.google.spanner.v1.Type:\0028\001\".\n\tQuer" + + "\001(\0132\027.google.spanner.v1.Type:\0028\001\"W\n\tQuer" + "yMode\022\n\n\006NORMAL\020\000\022\010\n\004PLAN\020\001\022\013\n\007PROFILE\020\002" - + "\"\240\004\n\026ExecuteBatchDmlRequest\0227\n\007session\030\001" + + "\022\016\n\nWITH_STATS\020\003\022\027\n\023WITH_PLAN_AND_STATS\020" + + "\004\"\240\004\n\026ExecuteBatchDmlRequest\0227\n\007session\030" + + "\001 \001(\tB&\340A\002\372A \n\036spanner.googleapis.com/Se" + + "ssion\022@\n\013transaction\030\002 \001(\0132&.google.span" + + "ner.v1.TransactionSelectorB\003\340A\002\022L\n\nstate" + + "ments\030\003 \003(\01323.google.spanner.v1.ExecuteB" + + "atchDmlRequest.StatementB\003\340A\002\022\022\n\005seqno\030\004" + + " \001(\003B\003\340A\002\022:\n\017request_options\030\005 \001(\0132!.goo" + + "gle.spanner.v1.RequestOptions\032\354\001\n\tStatem" + + "ent\022\020\n\003sql\030\001 \001(\tB\003\340A\002\022\'\n\006params\030\002 \001(\0132\027." + + "google.protobuf.Struct\022X\n\013param_types\030\003 " + + "\003(\0132C.google.spanner.v1.ExecuteBatchDmlR" + + "equest.Statement.ParamTypesEntry\032J\n\017Para" + + "mTypesEntry\022\013\n\003key\030\001 \001(\t\022&\n\005value\030\002 \001(\0132" + + "\027.google.spanner.v1.Type:\0028\001\"\303\001\n\027Execute" + + "BatchDmlResponse\0221\n\013result_sets\030\001 \003(\0132\034." + + "google.spanner.v1.ResultSet\022\"\n\006status\030\002 " + + "\001(\0132\022.google.rpc.Status\022Q\n\017precommit_tok" + + "en\030\003 \001(\01323.google.spanner.v1.Multiplexed" + + "SessionPrecommitTokenB\003\340A\001\"H\n\020PartitionO" + + "ptions\022\034\n\024partition_size_bytes\030\001 \001(\003\022\026\n\016" + + "max_partitions\030\002 \001(\003\"\243\003\n\025PartitionQueryR" + + "equest\0227\n\007session\030\001 \001(\tB&\340A\002\372A \n\036spanner" + + ".googleapis.com/Session\022;\n\013transaction\030\002" + + " \001(\0132&.google.spanner.v1.TransactionSele" + + "ctor\022\020\n\003sql\030\003 \001(\tB\003\340A\002\022\'\n\006params\030\004 \001(\0132\027" + + ".google.protobuf.Struct\022M\n\013param_types\030\005" + + " \003(\01328.google.spanner.v1.PartitionQueryR" + + "equest.ParamTypesEntry\022>\n\021partition_opti" + + "ons\030\006 \001(\0132#.google.spanner.v1.PartitionO" + + "ptions\032J\n\017ParamTypesEntry\022\013\n\003key\030\001 \001(\t\022&" + + "\n\005value\030\002 \001(\0132\027.google.spanner.v1.Type:\002" + + "8\001\"\261\002\n\024PartitionReadRequest\0227\n\007session\030\001" + " \001(\tB&\340A\002\372A \n\036spanner.googleapis.com/Ses" - + "sion\022@\n\013transaction\030\002 \001(\0132&.google.spann" - + "er.v1.TransactionSelectorB\003\340A\002\022L\n\nstatem" - + "ents\030\003 \003(\01323.google.spanner.v1.ExecuteBa" - + "tchDmlRequest.StatementB\003\340A\002\022\022\n\005seqno\030\004 " - + "\001(\003B\003\340A\002\022:\n\017request_options\030\005 \001(\0132!.goog" - + "le.spanner.v1.RequestOptions\032\354\001\n\tStateme" - + "nt\022\020\n\003sql\030\001 \001(\tB\003\340A\002\022\'\n\006params\030\002 \001(\0132\027.g" - + "oogle.protobuf.Struct\022X\n\013param_types\030\003 \003" - + "(\0132C.google.spanner.v1.ExecuteBatchDmlRe" - + "quest.Statement.ParamTypesEntry\032J\n\017Param" - + "TypesEntry\022\013\n\003key\030\001 \001(\t\022&\n\005value\030\002 \001(\0132\027" - + ".google.spanner.v1.Type:\0028\001\"p\n\027ExecuteBa" - + "tchDmlResponse\0221\n\013result_sets\030\001 \003(\0132\034.go" - + "ogle.spanner.v1.ResultSet\022\"\n\006status\030\002 \001(" - + "\0132\022.google.rpc.Status\"H\n\020PartitionOption" - + "s\022\034\n\024partition_size_bytes\030\001 \001(\003\022\026\n\016max_p" - + "artitions\030\002 \001(\003\"\243\003\n\025PartitionQueryReques" - + "t\0227\n\007session\030\001 \001(\tB&\340A\002\372A \n\036spanner.goog" - + "leapis.com/Session\022;\n\013transaction\030\002 \001(\0132" - + "&.google.spanner.v1.TransactionSelector\022" - + "\020\n\003sql\030\003 \001(\tB\003\340A\002\022\'\n\006params\030\004 \001(\0132\027.goog" - + "le.protobuf.Struct\022M\n\013param_types\030\005 \003(\0132" - + "8.google.spanner.v1.PartitionQueryReques" - + "t.ParamTypesEntry\022>\n\021partition_options\030\006" - + " \001(\0132#.google.spanner.v1.PartitionOption" - + "s\032J\n\017ParamTypesEntry\022\013\n\003key\030\001 \001(\t\022&\n\005val" - + "ue\030\002 \001(\0132\027.google.spanner.v1.Type:\0028\001\"\261\002" - + "\n\024PartitionReadRequest\0227\n\007session\030\001 \001(\tB" - + "&\340A\002\372A \n\036spanner.googleapis.com/Session\022" - + ";\n\013transaction\030\002 \001(\0132&.google.spanner.v1" - + ".TransactionSelector\022\022\n\005table\030\003 \001(\tB\003\340A\002" - + "\022\r\n\005index\030\004 \001(\t\022\017\n\007columns\030\005 \003(\t\022/\n\007key_" - + "set\030\006 \001(\0132\031.google.spanner.v1.KeySetB\003\340A" - + "\002\022>\n\021partition_options\030\t \001(\0132#.google.sp" - + "anner.v1.PartitionOptions\"$\n\tPartition\022\027" - + "\n\017partition_token\030\001 \001(\014\"z\n\021PartitionResp" - + "onse\0220\n\npartitions\030\001 \003(\0132\034.google.spanne" - + "r.v1.Partition\0223\n\013transaction\030\002 \001(\0132\036.go" - + "ogle.spanner.v1.Transaction\"\312\003\n\013ReadRequ" - + "est\0227\n\007session\030\001 \001(\tB&\340A\002\372A \n\036spanner.go" - + "ogleapis.com/Session\022;\n\013transaction\030\002 \001(" - + "\0132&.google.spanner.v1.TransactionSelecto" - + "r\022\022\n\005table\030\003 \001(\tB\003\340A\002\022\r\n\005index\030\004 \001(\t\022\024\n\007" - + "columns\030\005 \003(\tB\003\340A\002\022/\n\007key_set\030\006 \001(\0132\031.go" - + "ogle.spanner.v1.KeySetB\003\340A\002\022\r\n\005limit\030\010 \001" - + "(\003\022\024\n\014resume_token\030\t \001(\014\022\027\n\017partition_to" - + "ken\030\n \001(\014\022:\n\017request_options\030\013 \001(\0132!.goo" - + "gle.spanner.v1.RequestOptions\022E\n\025directe" - + "d_read_options\030\016 \001(\0132&.google.spanner.v1" - + ".DirectedReadOptions\022\032\n\022data_boost_enabl" - + "ed\030\017 \001(\010\"\313\001\n\027BeginTransactionRequest\0227\n\007" - + "session\030\001 \001(\tB&\340A\002\372A \n\036spanner.googleapi" - + "s.com/Session\022;\n\007options\030\002 \001(\0132%.google." - + "spanner.v1.TransactionOptionsB\003\340A\002\022:\n\017re" - + "quest_options\030\003 \001(\0132!.google.spanner.v1." - + "RequestOptions\"\375\002\n\rCommitRequest\0227\n\007sess" - + "ion\030\001 \001(\tB&\340A\002\372A \n\036spanner.googleapis.co" - + "m/Session\022\030\n\016transaction_id\030\002 \001(\014H\000\022G\n\026s" - + "ingle_use_transaction\030\003 \001(\0132%.google.spa" - + "nner.v1.TransactionOptionsH\000\022.\n\tmutation" - + "s\030\004 \003(\0132\033.google.spanner.v1.Mutation\022\033\n\023" - + "return_commit_stats\030\005 \001(\010\0228\n\020max_commit_" - + "delay\030\010 \001(\0132\031.google.protobuf.DurationB\003" - + "\340A\001\022:\n\017request_options\030\006 \001(\0132!.google.sp" - + "anner.v1.RequestOptionsB\r\n\013transaction\"g" - + "\n\017RollbackRequest\0227\n\007session\030\001 \001(\tB&\340A\002\372" - + "A \n\036spanner.googleapis.com/Session\022\033\n\016tr" - + "ansaction_id\030\002 \001(\014B\003\340A\002\"\316\002\n\021BatchWriteRe" - + "quest\0227\n\007session\030\001 \001(\tB&\340A\002\372A \n\036spanner." - + "googleapis.com/Session\022:\n\017request_option" - + "s\030\003 \001(\0132!.google.spanner.v1.RequestOptio" - + "ns\022P\n\017mutation_groups\030\004 \003(\01322.google.spa" - + "nner.v1.BatchWriteRequest.MutationGroupB" - + "\003\340A\002\022,\n\037exclude_txn_from_change_streams\030" - + "\005 \001(\010B\003\340A\001\032D\n\rMutationGroup\0223\n\tmutations" - + "\030\001 \003(\0132\033.google.spanner.v1.MutationB\003\340A\002" - + "\"\177\n\022BatchWriteResponse\022\017\n\007indexes\030\001 \003(\005\022" - + "\"\n\006status\030\002 \001(\0132\022.google.rpc.Status\0224\n\020c" - + "ommit_timestamp\030\003 \001(\0132\032.google.protobuf." - + "Timestamp2\213\030\n\007Spanner\022\246\001\n\rCreateSession\022" - + "\'.google.spanner.v1.CreateSessionRequest" - + "\032\032.google.spanner.v1.Session\"P\332A\010databas" - + "e\202\323\344\223\002?\":/v1/{database=projects/*/instan" - + "ces/*/databases/*}/sessions:\001*\022\340\001\n\023Batch" - + "CreateSessions\022-.google.spanner.v1.Batch" - + "CreateSessionsRequest\032..google.spanner.v" - + "1.BatchCreateSessionsResponse\"j\332A\026databa" - + "se,session_count\202\323\344\223\002K\"F/v1/{database=pr" - + "ojects/*/instances/*/databases/*}/sessio" - + "ns:batchCreate:\001*\022\227\001\n\nGetSession\022$.googl" - + "e.spanner.v1.GetSessionRequest\032\032.google." - + "spanner.v1.Session\"G\332A\004name\202\323\344\223\002:\0228/v1/{" - + "name=projects/*/instances/*/databases/*/" - + "sessions/*}\022\256\001\n\014ListSessions\022&.google.sp" - + "anner.v1.ListSessionsRequest\032\'.google.sp" - + "anner.v1.ListSessionsResponse\"M\332A\010databa" - + "se\202\323\344\223\002<\022:/v1/{database=projects/*/insta" - + "nces/*/databases/*}/sessions\022\231\001\n\rDeleteS" - + "ession\022\'.google.spanner.v1.DeleteSession" - + "Request\032\026.google.protobuf.Empty\"G\332A\004name" - + "\202\323\344\223\002:*8/v1/{name=projects/*/instances/*" - + "/databases/*/sessions/*}\022\243\001\n\nExecuteSql\022" - + "$.google.spanner.v1.ExecuteSqlRequest\032\034." - + "google.spanner.v1.ResultSet\"Q\202\323\344\223\002K\"F/v1" - + "/{session=projects/*/instances/*/databas" - + "es/*/sessions/*}:executeSql:\001*\022\276\001\n\023Execu" - + "teStreamingSql\022$.google.spanner.v1.Execu" - + "teSqlRequest\032#.google.spanner.v1.Partial" - + "ResultSet\"Z\202\323\344\223\002T\"O/v1/{session=projects" - + "/*/instances/*/databases/*/sessions/*}:e" - + "xecuteStreamingSql:\001*0\001\022\300\001\n\017ExecuteBatch" - + "Dml\022).google.spanner.v1.ExecuteBatchDmlR" - + "equest\032*.google.spanner.v1.ExecuteBatchD" - + "mlResponse\"V\202\323\344\223\002P\"K/v1/{session=project" - + "s/*/instances/*/databases/*/sessions/*}:" - + "executeBatchDml:\001*\022\221\001\n\004Read\022\036.google.spa" - + "nner.v1.ReadRequest\032\034.google.spanner.v1." - + "ResultSet\"K\202\323\344\223\002E\"@/v1/{session=projects" - + "/*/instances/*/databases/*/sessions/*}:r" - + "ead:\001*\022\254\001\n\rStreamingRead\022\036.google.spanne" - + "r.v1.ReadRequest\032#.google.spanner.v1.Par" - + "tialResultSet\"T\202\323\344\223\002N\"I/v1/{session=proj" + + "sion\022;\n\013transaction\030\002 \001(\0132&.google.spann" + + "er.v1.TransactionSelector\022\022\n\005table\030\003 \001(\t" + + "B\003\340A\002\022\r\n\005index\030\004 \001(\t\022\017\n\007columns\030\005 \003(\t\022/\n" + + "\007key_set\030\006 \001(\0132\031.google.spanner.v1.KeySe" + + "tB\003\340A\002\022>\n\021partition_options\030\t \001(\0132#.goog" + + "le.spanner.v1.PartitionOptions\"$\n\tPartit" + + "ion\022\027\n\017partition_token\030\001 \001(\014\"z\n\021Partitio" + + "nResponse\0220\n\npartitions\030\001 \003(\0132\034.google.s" + + "panner.v1.Partition\0223\n\013transaction\030\002 \001(\013" + + "2\036.google.spanner.v1.Transaction\"\366\005\n\013Rea" + + "dRequest\0227\n\007session\030\001 \001(\tB&\340A\002\372A \n\036spann" + + "er.googleapis.com/Session\022;\n\013transaction" + + "\030\002 \001(\0132&.google.spanner.v1.TransactionSe" + + "lector\022\022\n\005table\030\003 \001(\tB\003\340A\002\022\r\n\005index\030\004 \001(" + + "\t\022\024\n\007columns\030\005 \003(\tB\003\340A\002\022/\n\007key_set\030\006 \001(\013" + + "2\031.google.spanner.v1.KeySetB\003\340A\002\022\r\n\005limi" + + "t\030\010 \001(\003\022\024\n\014resume_token\030\t \001(\014\022\027\n\017partiti" + + "on_token\030\n \001(\014\022:\n\017request_options\030\013 \001(\0132" + + "!.google.spanner.v1.RequestOptions\022E\n\025di" + + "rected_read_options\030\016 \001(\0132&.google.spann" + + "er.v1.DirectedReadOptions\022\032\n\022data_boost_" + + "enabled\030\017 \001(\010\022=\n\010order_by\030\020 \001(\0162&.google" + + ".spanner.v1.ReadRequest.OrderByB\003\340A\001\022?\n\t" + + "lock_hint\030\021 \001(\0162\'.google.spanner.v1.Read" + + "Request.LockHintB\003\340A\001\"T\n\007OrderBy\022\030\n\024ORDE" + + "R_BY_UNSPECIFIED\020\000\022\030\n\024ORDER_BY_PRIMARY_K" + + "EY\020\001\022\025\n\021ORDER_BY_NO_ORDER\020\002\"T\n\010LockHint\022" + + "\031\n\025LOCK_HINT_UNSPECIFIED\020\000\022\024\n\020LOCK_HINT_" + + "SHARED\020\001\022\027\n\023LOCK_HINT_EXCLUSIVE\020\002\"\203\002\n\027Be" + + "ginTransactionRequest\0227\n\007session\030\001 \001(\tB&" + + "\340A\002\372A \n\036spanner.googleapis.com/Session\022;" + + "\n\007options\030\002 \001(\0132%.google.spanner.v1.Tran" + + "sactionOptionsB\003\340A\002\022:\n\017request_options\030\003" + + " \001(\0132!.google.spanner.v1.RequestOptions\022" + + "6\n\014mutation_key\030\004 \001(\0132\033.google.spanner.v" + + "1.MutationB\003\340A\001\"\320\003\n\rCommitRequest\0227\n\007ses" + + "sion\030\001 \001(\tB&\340A\002\372A \n\036spanner.googleapis.c" + + "om/Session\022\030\n\016transaction_id\030\002 \001(\014H\000\022G\n\026" + + "single_use_transaction\030\003 \001(\0132%.google.sp" + + "anner.v1.TransactionOptionsH\000\022.\n\tmutatio" + + "ns\030\004 \003(\0132\033.google.spanner.v1.Mutation\022\033\n" + + "\023return_commit_stats\030\005 \001(\010\0228\n\020max_commit" + + "_delay\030\010 \001(\0132\031.google.protobuf.DurationB" + + "\003\340A\001\022:\n\017request_options\030\006 \001(\0132!.google.s" + + "panner.v1.RequestOptions\022Q\n\017precommit_to" + + "ken\030\t \001(\01323.google.spanner.v1.Multiplexe" + + "dSessionPrecommitTokenB\003\340A\001B\r\n\013transacti" + + "on\"g\n\017RollbackRequest\0227\n\007session\030\001 \001(\tB&" + + "\340A\002\372A \n\036spanner.googleapis.com/Session\022\033" + + "\n\016transaction_id\030\002 \001(\014B\003\340A\002\"\316\002\n\021BatchWri" + + "teRequest\0227\n\007session\030\001 \001(\tB&\340A\002\372A \n\036span" + + "ner.googleapis.com/Session\022:\n\017request_op" + + "tions\030\003 \001(\0132!.google.spanner.v1.RequestO" + + "ptions\022P\n\017mutation_groups\030\004 \003(\01322.google" + + ".spanner.v1.BatchWriteRequest.MutationGr" + + "oupB\003\340A\002\022,\n\037exclude_txn_from_change_stre" + + "ams\030\005 \001(\010B\003\340A\001\032D\n\rMutationGroup\0223\n\tmutat" + + "ions\030\001 \003(\0132\033.google.spanner.v1.MutationB" + + "\003\340A\002\"\177\n\022BatchWriteResponse\022\017\n\007indexes\030\001 " + + "\003(\005\022\"\n\006status\030\002 \001(\0132\022.google.rpc.Status\022" + + "4\n\020commit_timestamp\030\003 \001(\0132\032.google.proto" + + "buf.Timestamp2\213\030\n\007Spanner\022\246\001\n\rCreateSess" + + "ion\022\'.google.spanner.v1.CreateSessionReq" + + "uest\032\032.google.spanner.v1.Session\"P\332A\010dat" + + "abase\202\323\344\223\002?\":/v1/{database=projects/*/in" + + "stances/*/databases/*}/sessions:\001*\022\340\001\n\023B" + + "atchCreateSessions\022-.google.spanner.v1.B" + + "atchCreateSessionsRequest\032..google.spann" + + "er.v1.BatchCreateSessionsResponse\"j\332A\026da" + + "tabase,session_count\202\323\344\223\002K\"F/v1/{databas" + + "e=projects/*/instances/*/databases/*}/se" + + "ssions:batchCreate:\001*\022\227\001\n\nGetSession\022$.g" + + "oogle.spanner.v1.GetSessionRequest\032\032.goo" + + "gle.spanner.v1.Session\"G\332A\004name\202\323\344\223\002:\0228/" + + "v1/{name=projects/*/instances/*/database" + + "s/*/sessions/*}\022\256\001\n\014ListSessions\022&.googl" + + "e.spanner.v1.ListSessionsRequest\032\'.googl" + + "e.spanner.v1.ListSessionsResponse\"M\332A\010da" + + "tabase\202\323\344\223\002<\022:/v1/{database=projects/*/i" + + "nstances/*/databases/*}/sessions\022\231\001\n\rDel" + + "eteSession\022\'.google.spanner.v1.DeleteSes" + + "sionRequest\032\026.google.protobuf.Empty\"G\332A\004" + + "name\202\323\344\223\002:*8/v1/{name=projects/*/instanc" + + "es/*/databases/*/sessions/*}\022\243\001\n\nExecute" + + "Sql\022$.google.spanner.v1.ExecuteSqlReques" + + "t\032\034.google.spanner.v1.ResultSet\"Q\202\323\344\223\002K\"" + + "F/v1/{session=projects/*/instances/*/dat" + + "abases/*/sessions/*}:executeSql:\001*\022\276\001\n\023E" + + "xecuteStreamingSql\022$.google.spanner.v1.E" + + "xecuteSqlRequest\032#.google.spanner.v1.Par" + + "tialResultSet\"Z\202\323\344\223\002T\"O/v1/{session=proj" + "ects/*/instances/*/databases/*/sessions/" - + "*}:streamingRead:\001*0\001\022\311\001\n\020BeginTransacti" - + "on\022*.google.spanner.v1.BeginTransactionR" - + "equest\032\036.google.spanner.v1.Transaction\"i" - + "\332A\017session,options\202\323\344\223\002Q\"L/v1/{session=p" - + "rojects/*/instances/*/databases/*/sessio" - + "ns/*}:beginTransaction:\001*\022\353\001\n\006Commit\022 .g" - + "oogle.spanner.v1.CommitRequest\032!.google." - + "spanner.v1.CommitResponse\"\233\001\332A session,t" - + "ransaction_id,mutations\332A(session,single" - + "_use_transaction,mutations\202\323\344\223\002G\"B/v1/{s" - + "ession=projects/*/instances/*/databases/" - + "*/sessions/*}:commit:\001*\022\260\001\n\010Rollback\022\".g" - + "oogle.spanner.v1.RollbackRequest\032\026.googl" - + "e.protobuf.Empty\"h\332A\026session,transaction" - + "_id\202\323\344\223\002I\"D/v1/{session=projects/*/insta" - + "nces/*/databases/*/sessions/*}:rollback:" - + "\001*\022\267\001\n\016PartitionQuery\022(.google.spanner.v" - + "1.PartitionQueryRequest\032$.google.spanner" - + ".v1.PartitionResponse\"U\202\323\344\223\002O\"J/v1/{sess" - + "ion=projects/*/instances/*/databases/*/s" - + "essions/*}:partitionQuery:\001*\022\264\001\n\rPartiti" - + "onRead\022\'.google.spanner.v1.PartitionRead" - + "Request\032$.google.spanner.v1.PartitionRes" - + "ponse\"T\202\323\344\223\002N\"I/v1/{session=projects/*/i" - + "nstances/*/databases/*/sessions/*}:parti" - + "tionRead:\001*\022\310\001\n\nBatchWrite\022$.google.span" - + "ner.v1.BatchWriteRequest\032%.google.spanne" - + "r.v1.BatchWriteResponse\"k\332A\027session,muta" - + "tion_groups\202\323\344\223\002K\"F/v1/{session=projects" - + "/*/instances/*/databases/*/sessions/*}:b" - + "atchWrite:\001*0\001\032w\312A\026spanner.googleapis.co" - + "m\322A[https://www.googleapis.com/auth/clou" - + "d-platform,https://www.googleapis.com/au" - + "th/spanner.dataB\221\002\n\025com.google.spanner.v" - + "1B\014SpannerProtoP\001Z5cloud.google.com/go/s" - + "panner/apiv1/spannerpb;spannerpb\252\002\027Googl" - + "e.Cloud.Spanner.V1\312\002\027Google\\Cloud\\Spanne" - + "r\\V1\352\002\032Google::Cloud::Spanner::V1\352A_\n\037sp" - + "anner.googleapis.com/Database\022 - * `StructType` defines the fields of a [STRUCT][google.spanner.v1.TypeCode.STRUCT] type. + * `StructType` defines the fields of a + * [STRUCT][google.spanner.v1.TypeCode.STRUCT] type. *
    * * Protobuf type {@code google.spanner.v1.StructType} @@ -1071,9 +1072,9 @@ public com.google.spanner.v1.StructType.Field getDefaultInstanceForType() { * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. *
    * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -1089,9 +1090,9 @@ public java.util.List getFieldsList() { * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. *
    * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -1108,9 +1109,9 @@ public java.util.List getFieldsList() { * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. *
    * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -1126,9 +1127,9 @@ public int getFieldsCount() { * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. *
    * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -1144,9 +1145,9 @@ public com.google.spanner.v1.StructType.Field getFields(int index) { * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. *
    * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -1319,7 +1320,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
    -   * `StructType` defines the fields of a [STRUCT][google.spanner.v1.TypeCode.STRUCT] type.
    +   * `StructType` defines the fields of a
    +   * [STRUCT][google.spanner.v1.TypeCode.STRUCT] type.
        * 
    * * Protobuf type {@code google.spanner.v1.StructType} @@ -1564,9 +1566,9 @@ private void ensureFieldsIsMutable() { * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. *
    * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -1585,9 +1587,9 @@ public java.util.List getFieldsList() { * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. *
    * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -1606,9 +1608,9 @@ public int getFieldsCount() { * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. *
    * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -1627,9 +1629,9 @@ public com.google.spanner.v1.StructType.Field getFields(int index) { * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. *
    * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -1654,9 +1656,9 @@ public Builder setFields(int index, com.google.spanner.v1.StructType.Field value * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. *
    * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -1679,9 +1681,9 @@ public Builder setFields( * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. *
    * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -1706,9 +1708,9 @@ public Builder addFields(com.google.spanner.v1.StructType.Field value) { * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. *
    * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -1733,9 +1735,9 @@ public Builder addFields(int index, com.google.spanner.v1.StructType.Field value * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. *
    * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -1757,9 +1759,9 @@ public Builder addFields(com.google.spanner.v1.StructType.Field.Builder builderF * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. *
    * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -1782,9 +1784,9 @@ public Builder addFields( * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. * * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -1807,9 +1809,9 @@ public Builder addAllFields( * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. * * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -1831,9 +1833,9 @@ public Builder clearFields() { * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. * * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -1855,9 +1857,9 @@ public Builder removeFields(int index) { * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. * * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -1872,9 +1874,9 @@ public com.google.spanner.v1.StructType.Field.Builder getFieldsBuilder(int index * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. * * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -1893,9 +1895,9 @@ public com.google.spanner.v1.StructType.FieldOrBuilder getFieldsOrBuilder(int in * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. * * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -1915,9 +1917,9 @@ public com.google.spanner.v1.StructType.FieldOrBuilder getFieldsOrBuilder(int in * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. * * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -1933,9 +1935,9 @@ public com.google.spanner.v1.StructType.Field.Builder addFieldsBuilder() { * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. * * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -1951,9 +1953,9 @@ public com.google.spanner.v1.StructType.Field.Builder addFieldsBuilder(int index * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. * * * repeated .google.spanner.v1.StructType.Field fields = 1; diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/StructTypeOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/StructTypeOrBuilder.java index f37dcafc632..0f1a899c670 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/StructTypeOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/StructTypeOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/type.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface StructTypeOrBuilder @@ -31,9 +31,9 @@ public interface StructTypeOrBuilder * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. * * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -46,9 +46,9 @@ public interface StructTypeOrBuilder * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. * * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -61,9 +61,9 @@ public interface StructTypeOrBuilder * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. * * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -76,9 +76,9 @@ public interface StructTypeOrBuilder * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. * * * repeated .google.spanner.v1.StructType.Field fields = 1; @@ -92,9 +92,9 @@ public interface StructTypeOrBuilder * The list of fields that make up this struct. Order is * significant, because values of this struct type are represented as * lists, where the order of field values matches the order of - * fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - * matches the order of columns in a read request, or the order of - * fields in the `SELECT` clause of a query. + * fields in the [StructType][google.spanner.v1.StructType]. In turn, the + * order of fields matches the order of columns in a read request, or the + * order of fields in the `SELECT` clause of a query. * * * repeated .google.spanner.v1.StructType.Field fields = 1; diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Transaction.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Transaction.java index b70ef9ce06a..5fc6801f4f8 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Transaction.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Transaction.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/transaction.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** @@ -154,6 +154,81 @@ public com.google.protobuf.TimestampOrBuilder getReadTimestampOrBuilder() { : readTimestamp_; } + public static final int PRECOMMIT_TOKEN_FIELD_NUMBER = 3; + private com.google.spanner.v1.MultiplexedSessionPrecommitToken precommitToken_; + /** + * + * + *
    +   * A precommit token will be included in the response of a BeginTransaction
    +   * request if the read-write transaction is on a multiplexed session and
    +   * a mutation_key was specified in the
    +   * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +   * request for this transaction.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + * + * @return Whether the precommitToken field is set. + */ + @java.lang.Override + public boolean hasPrecommitToken() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * + * + *
    +   * A precommit token will be included in the response of a BeginTransaction
    +   * request if the read-write transaction is on a multiplexed session and
    +   * a mutation_key was specified in the
    +   * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +   * request for this transaction.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + * + * @return The precommitToken. + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken() { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + /** + * + * + *
    +   * A precommit token will be included in the response of a BeginTransaction
    +   * request if the read-write transaction is on a multiplexed session and
    +   * a mutation_key was specified in the
    +   * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +   * request for this transaction.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + */ + @java.lang.Override + public com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder + getPrecommitTokenOrBuilder() { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -174,6 +249,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(2, getReadTimestamp()); } + if (((bitField0_ & 0x00000002) != 0)) { + output.writeMessage(3, getPrecommitToken()); + } getUnknownFields().writeTo(output); } @@ -189,6 +267,9 @@ public int getSerializedSize() { if (((bitField0_ & 0x00000001) != 0)) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getReadTimestamp()); } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getPrecommitToken()); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -209,6 +290,10 @@ public boolean equals(final java.lang.Object obj) { if (hasReadTimestamp()) { if (!getReadTimestamp().equals(other.getReadTimestamp())) return false; } + if (hasPrecommitToken() != other.hasPrecommitToken()) return false; + if (hasPrecommitToken()) { + if (!getPrecommitToken().equals(other.getPrecommitToken())) return false; + } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @@ -226,6 +311,10 @@ public int hashCode() { hash = (37 * hash) + READ_TIMESTAMP_FIELD_NUMBER; hash = (53 * hash) + getReadTimestamp().hashCode(); } + if (hasPrecommitToken()) { + hash = (37 * hash) + PRECOMMIT_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getPrecommitToken().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -366,6 +455,7 @@ private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { getReadTimestampFieldBuilder(); + getPrecommitTokenFieldBuilder(); } } @@ -379,6 +469,11 @@ public Builder clear() { readTimestampBuilder_.dispose(); readTimestampBuilder_ = null; } + precommitToken_ = null; + if (precommitTokenBuilder_ != null) { + precommitTokenBuilder_.dispose(); + precommitTokenBuilder_ = null; + } return this; } @@ -423,6 +518,11 @@ private void buildPartial0(com.google.spanner.v1.Transaction result) { readTimestampBuilder_ == null ? readTimestamp_ : readTimestampBuilder_.build(); to_bitField0_ |= 0x00000001; } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.precommitToken_ = + precommitTokenBuilder_ == null ? precommitToken_ : precommitTokenBuilder_.build(); + to_bitField0_ |= 0x00000002; + } result.bitField0_ |= to_bitField0_; } @@ -477,6 +577,9 @@ public Builder mergeFrom(com.google.spanner.v1.Transaction other) { if (other.hasReadTimestamp()) { mergeReadTimestamp(other.getReadTimestamp()); } + if (other.hasPrecommitToken()) { + mergePrecommitToken(other.getPrecommitToken()); + } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; @@ -515,6 +618,12 @@ public Builder mergeFrom( bitField0_ |= 0x00000002; break; } // case 18 + case 26: + { + input.readMessage(getPrecommitTokenFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000004; + break; + } // case 26 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { @@ -840,6 +949,268 @@ public com.google.protobuf.TimestampOrBuilder getReadTimestampOrBuilder() { return readTimestampBuilder_; } + private com.google.spanner.v1.MultiplexedSessionPrecommitToken precommitToken_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder> + precommitTokenBuilder_; + /** + * + * + *
    +     * A precommit token will be included in the response of a BeginTransaction
    +     * request if the read-write transaction is on a multiplexed session and
    +     * a mutation_key was specified in the
    +     * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +     * request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + * + * @return Whether the precommitToken field is set. + */ + public boolean hasPrecommitToken() { + return ((bitField0_ & 0x00000004) != 0); + } + /** + * + * + *
    +     * A precommit token will be included in the response of a BeginTransaction
    +     * request if the read-write transaction is on a multiplexed session and
    +     * a mutation_key was specified in the
    +     * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +     * request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + * + * @return The precommitToken. + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken() { + if (precommitTokenBuilder_ == null) { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } else { + return precommitTokenBuilder_.getMessage(); + } + } + /** + * + * + *
    +     * A precommit token will be included in the response of a BeginTransaction
    +     * request if the read-write transaction is on a multiplexed session and
    +     * a mutation_key was specified in the
    +     * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +     * request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + */ + public Builder setPrecommitToken(com.google.spanner.v1.MultiplexedSessionPrecommitToken value) { + if (precommitTokenBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + precommitToken_ = value; + } else { + precommitTokenBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
    +     * A precommit token will be included in the response of a BeginTransaction
    +     * request if the read-write transaction is on a multiplexed session and
    +     * a mutation_key was specified in the
    +     * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +     * request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + */ + public Builder setPrecommitToken( + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder builderForValue) { + if (precommitTokenBuilder_ == null) { + precommitToken_ = builderForValue.build(); + } else { + precommitTokenBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * + * + *
    +     * A precommit token will be included in the response of a BeginTransaction
    +     * request if the read-write transaction is on a multiplexed session and
    +     * a mutation_key was specified in the
    +     * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +     * request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + */ + public Builder mergePrecommitToken( + com.google.spanner.v1.MultiplexedSessionPrecommitToken value) { + if (precommitTokenBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0) + && precommitToken_ != null + && precommitToken_ + != com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance()) { + getPrecommitTokenBuilder().mergeFrom(value); + } else { + precommitToken_ = value; + } + } else { + precommitTokenBuilder_.mergeFrom(value); + } + if (precommitToken_ != null) { + bitField0_ |= 0x00000004; + onChanged(); + } + return this; + } + /** + * + * + *
    +     * A precommit token will be included in the response of a BeginTransaction
    +     * request if the read-write transaction is on a multiplexed session and
    +     * a mutation_key was specified in the
    +     * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +     * request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + */ + public Builder clearPrecommitToken() { + bitField0_ = (bitField0_ & ~0x00000004); + precommitToken_ = null; + if (precommitTokenBuilder_ != null) { + precommitTokenBuilder_.dispose(); + precommitTokenBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
    +     * A precommit token will be included in the response of a BeginTransaction
    +     * request if the read-write transaction is on a multiplexed session and
    +     * a mutation_key was specified in the
    +     * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +     * request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder + getPrecommitTokenBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getPrecommitTokenFieldBuilder().getBuilder(); + } + /** + * + * + *
    +     * A precommit token will be included in the response of a BeginTransaction
    +     * request if the read-write transaction is on a multiplexed session and
    +     * a mutation_key was specified in the
    +     * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +     * request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + */ + public com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder + getPrecommitTokenOrBuilder() { + if (precommitTokenBuilder_ != null) { + return precommitTokenBuilder_.getMessageOrBuilder(); + } else { + return precommitToken_ == null + ? com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance() + : precommitToken_; + } + } + /** + * + * + *
    +     * A precommit token will be included in the response of a BeginTransaction
    +     * request if the read-write transaction is on a multiplexed session and
    +     * a mutation_key was specified in the
    +     * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +     * The precommit token with the highest sequence number from this transaction
    +     * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +     * request for this transaction.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder> + getPrecommitTokenFieldBuilder() { + if (precommitTokenBuilder_ == null) { + precommitTokenBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.spanner.v1.MultiplexedSessionPrecommitToken, + com.google.spanner.v1.MultiplexedSessionPrecommitToken.Builder, + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder>( + getPrecommitToken(), getParentForChildren(), isClean()); + precommitToken_ = null; + } + return precommitTokenBuilder_; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOptions.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOptions.java index fdf2b1e69bc..31688b10739 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOptions.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOptions.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/transaction.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** @@ -413,6 +413,25 @@ public interface ReadWriteOrBuilder * @return The readLockMode. */ com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode getReadLockMode(); + + /** + * + * + *
    +     * Optional. Clients should pass the transaction ID of the previous
    +     * transaction attempt that was aborted if this transaction is being
    +     * executed on a multiplexed session.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * bytes multiplexed_session_previous_transaction_id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The multiplexedSessionPreviousTransactionId. + */ + com.google.protobuf.ByteString getMultiplexedSessionPreviousTransactionId(); } /** * @@ -436,6 +455,7 @@ private ReadWrite(com.google.protobuf.GeneratedMessageV3.Builder builder) { private ReadWrite() { readLockMode_ = 0; + multiplexedSessionPreviousTransactionId_ = com.google.protobuf.ByteString.EMPTY; } @java.lang.Override @@ -672,6 +692,31 @@ public com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode getReadLo : result; } + public static final int MULTIPLEXED_SESSION_PREVIOUS_TRANSACTION_ID_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString multiplexedSessionPreviousTransactionId_ = + com.google.protobuf.ByteString.EMPTY; + /** + * + * + *
    +     * Optional. Clients should pass the transaction ID of the previous
    +     * transaction attempt that was aborted if this transaction is being
    +     * executed on a multiplexed session.
    +     * This feature is not yet supported and will result in an UNIMPLEMENTED
    +     * error.
    +     * 
    + * + * + * bytes multiplexed_session_previous_transaction_id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The multiplexedSessionPreviousTransactionId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getMultiplexedSessionPreviousTransactionId() { + return multiplexedSessionPreviousTransactionId_; + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -692,6 +737,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io .getNumber()) { output.writeEnum(1, readLockMode_); } + if (!multiplexedSessionPreviousTransactionId_.isEmpty()) { + output.writeBytes(2, multiplexedSessionPreviousTransactionId_); + } getUnknownFields().writeTo(output); } @@ -707,6 +755,11 @@ public int getSerializedSize() { .getNumber()) { size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, readLockMode_); } + if (!multiplexedSessionPreviousTransactionId_.isEmpty()) { + size += + com.google.protobuf.CodedOutputStream.computeBytesSize( + 2, multiplexedSessionPreviousTransactionId_); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -724,6 +777,8 @@ public boolean equals(final java.lang.Object obj) { (com.google.spanner.v1.TransactionOptions.ReadWrite) obj; if (readLockMode_ != other.readLockMode_) return false; + if (!getMultiplexedSessionPreviousTransactionId() + .equals(other.getMultiplexedSessionPreviousTransactionId())) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @@ -737,6 +792,8 @@ public int hashCode() { hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + READ_LOCK_MODE_FIELD_NUMBER; hash = (53 * hash) + readLockMode_; + hash = (37 * hash) + MULTIPLEXED_SESSION_PREVIOUS_TRANSACTION_ID_FIELD_NUMBER; + hash = (53 * hash) + getMultiplexedSessionPreviousTransactionId().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -880,6 +937,7 @@ public Builder clear() { super.clear(); bitField0_ = 0; readLockMode_ = 0; + multiplexedSessionPreviousTransactionId_ = com.google.protobuf.ByteString.EMPTY; return this; } @@ -919,6 +977,10 @@ private void buildPartial0(com.google.spanner.v1.TransactionOptions.ReadWrite re if (((from_bitField0_ & 0x00000001) != 0)) { result.readLockMode_ = readLockMode_; } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.multiplexedSessionPreviousTransactionId_ = + multiplexedSessionPreviousTransactionId_; + } } @java.lang.Override @@ -972,6 +1034,11 @@ public Builder mergeFrom(com.google.spanner.v1.TransactionOptions.ReadWrite othe if (other.readLockMode_ != 0) { setReadLockModeValue(other.getReadLockModeValue()); } + if (other.getMultiplexedSessionPreviousTransactionId() + != com.google.protobuf.ByteString.EMPTY) { + setMultiplexedSessionPreviousTransactionId( + other.getMultiplexedSessionPreviousTransactionId()); + } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; @@ -1004,6 +1071,12 @@ public Builder mergeFrom( bitField0_ |= 0x00000001; break; } // case 8 + case 18: + { + multiplexedSessionPreviousTransactionId_ = input.readBytes(); + bitField0_ |= 0x00000002; + break; + } // case 18 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { @@ -1122,6 +1195,82 @@ public Builder clearReadLockMode() { return this; } + private com.google.protobuf.ByteString multiplexedSessionPreviousTransactionId_ = + com.google.protobuf.ByteString.EMPTY; + /** + * + * + *
    +       * Optional. Clients should pass the transaction ID of the previous
    +       * transaction attempt that was aborted if this transaction is being
    +       * executed on a multiplexed session.
    +       * This feature is not yet supported and will result in an UNIMPLEMENTED
    +       * error.
    +       * 
    + * + * + * bytes multiplexed_session_previous_transaction_id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The multiplexedSessionPreviousTransactionId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getMultiplexedSessionPreviousTransactionId() { + return multiplexedSessionPreviousTransactionId_; + } + /** + * + * + *
    +       * Optional. Clients should pass the transaction ID of the previous
    +       * transaction attempt that was aborted if this transaction is being
    +       * executed on a multiplexed session.
    +       * This feature is not yet supported and will result in an UNIMPLEMENTED
    +       * error.
    +       * 
    + * + * + * bytes multiplexed_session_previous_transaction_id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The multiplexedSessionPreviousTransactionId to set. + * @return This builder for chaining. + */ + public Builder setMultiplexedSessionPreviousTransactionId( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + multiplexedSessionPreviousTransactionId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * + * + *
    +       * Optional. Clients should pass the transaction ID of the previous
    +       * transaction attempt that was aborted if this transaction is being
    +       * executed on a multiplexed session.
    +       * This feature is not yet supported and will result in an UNIMPLEMENTED
    +       * error.
    +       * 
    + * + * + * bytes multiplexed_session_previous_transaction_id = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearMultiplexedSessionPreviousTransactionId() { + bitField0_ = (bitField0_ & ~0x00000002); + multiplexedSessionPreviousTransactionId_ = + getDefaultInstance().getMultiplexedSessionPreviousTransactionId(); + onChanged(); + return this; + } + @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOptionsOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOptionsOrBuilder.java index 0c0205fb937..92630bc65a3 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOptionsOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOptionsOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/transaction.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface TransactionOptionsOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOrBuilder.java index 640e7ae9a68..bf2232c24d2 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/transaction.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface TransactionOrBuilder @@ -93,4 +93,63 @@ public interface TransactionOrBuilder * .google.protobuf.Timestamp read_timestamp = 2; */ com.google.protobuf.TimestampOrBuilder getReadTimestampOrBuilder(); + + /** + * + * + *
    +   * A precommit token will be included in the response of a BeginTransaction
    +   * request if the read-write transaction is on a multiplexed session and
    +   * a mutation_key was specified in the
    +   * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +   * request for this transaction.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + * + * @return Whether the precommitToken field is set. + */ + boolean hasPrecommitToken(); + /** + * + * + *
    +   * A precommit token will be included in the response of a BeginTransaction
    +   * request if the read-write transaction is on a multiplexed session and
    +   * a mutation_key was specified in the
    +   * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +   * request for this transaction.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + * + * @return The precommitToken. + */ + com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken(); + /** + * + * + *
    +   * A precommit token will be included in the response of a BeginTransaction
    +   * request if the read-write transaction is on a multiplexed session and
    +   * a mutation_key was specified in the
    +   * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    +   * The precommit token with the highest sequence number from this transaction
    +   * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    +   * request for this transaction.
    +   * This feature is not yet supported and will result in an UNIMPLEMENTED
    +   * error.
    +   * 
    + * + * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; + */ + com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder getPrecommitTokenOrBuilder(); } diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionProto.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionProto.java index b06bd338898..07dbdbbb0b0 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionProto.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionProto.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/transaction.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public final class TransactionProto { @@ -52,6 +52,10 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_spanner_v1_TransactionSelector_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_spanner_v1_TransactionSelector_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_v1_MultiplexedSessionPrecommitToken_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_spanner_v1_MultiplexedSessionPrecommitToken_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; @@ -62,44 +66,51 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { static { java.lang.String[] descriptorData = { "\n#google/spanner/v1/transaction.proto\022\021g" - + "oogle.spanner.v1\032\036google/protobuf/durati" - + "on.proto\032\037google/protobuf/timestamp.prot" - + "o\"\224\006\n\022TransactionOptions\022E\n\nread_write\030\001" - + " \001(\0132/.google.spanner.v1.TransactionOpti" - + "ons.ReadWriteH\000\022O\n\017partitioned_dml\030\003 \001(\013" - + "24.google.spanner.v1.TransactionOptions." - + "PartitionedDmlH\000\022C\n\tread_only\030\002 \001(\0132..go" - + "ogle.spanner.v1.TransactionOptions.ReadO" - + "nlyH\000\022\'\n\037exclude_txn_from_change_streams" - + "\030\005 \001(\010\032\262\001\n\tReadWrite\022T\n\016read_lock_mode\030\001" - + " \001(\0162<.google.spanner.v1.TransactionOpti" - + "ons.ReadWrite.ReadLockMode\"O\n\014ReadLockMo" - + "de\022\036\n\032READ_LOCK_MODE_UNSPECIFIED\020\000\022\017\n\013PE" - + "SSIMISTIC\020\001\022\016\n\nOPTIMISTIC\020\002\032\020\n\016Partition" - + "edDml\032\250\002\n\010ReadOnly\022\020\n\006strong\030\001 \001(\010H\000\0228\n\022" - + "min_read_timestamp\030\002 \001(\0132\032.google.protob" - + "uf.TimestampH\000\0222\n\rmax_staleness\030\003 \001(\0132\031." - + "google.protobuf.DurationH\000\0224\n\016read_times" - + "tamp\030\004 \001(\0132\032.google.protobuf.TimestampH\000" - + "\0224\n\017exact_staleness\030\005 \001(\0132\031.google.proto" - + "buf.DurationH\000\022\035\n\025return_read_timestamp\030" - + "\006 \001(\010B\021\n\017timestamp_boundB\006\n\004mode\"M\n\013Tran" - + "saction\022\n\n\002id\030\001 \001(\014\0222\n\016read_timestamp\030\002 " - + "\001(\0132\032.google.protobuf.Timestamp\"\244\001\n\023Tran" - + "sactionSelector\022;\n\nsingle_use\030\001 \001(\0132%.go" - + "ogle.spanner.v1.TransactionOptionsH\000\022\014\n\002" - + "id\030\002 \001(\014H\000\0226\n\005begin\030\003 \001(\0132%.google.spann" - + "er.v1.TransactionOptionsH\000B\n\n\010selectorB\263" - + "\001\n\025com.google.spanner.v1B\020TransactionPro" - + "toP\001Z5cloud.google.com/go/spanner/apiv1/" - + "spannerpb;spannerpb\252\002\027Google.Cloud.Spann" - + "er.V1\312\002\027Google\\Cloud\\Spanner\\V1\352\002\032Google" - + "::Cloud::Spanner::V1b\006proto3" + + "oogle.spanner.v1\032\037google/api/field_behav" + + "ior.proto\032\036google/protobuf/duration.prot" + + "o\032\037google/protobuf/timestamp.proto\"\316\006\n\022T" + + "ransactionOptions\022E\n\nread_write\030\001 \001(\0132/." + + "google.spanner.v1.TransactionOptions.Rea" + + "dWriteH\000\022O\n\017partitioned_dml\030\003 \001(\01324.goog" + + "le.spanner.v1.TransactionOptions.Partiti" + + "onedDmlH\000\022C\n\tread_only\030\002 \001(\0132..google.sp" + + "anner.v1.TransactionOptions.ReadOnlyH\000\022\'" + + "\n\037exclude_txn_from_change_streams\030\005 \001(\010\032" + + "\354\001\n\tReadWrite\022T\n\016read_lock_mode\030\001 \001(\0162<." + + "google.spanner.v1.TransactionOptions.Rea" + + "dWrite.ReadLockMode\0228\n+multiplexed_sessi" + + "on_previous_transaction_id\030\002 \001(\014B\003\340A\001\"O\n" + + "\014ReadLockMode\022\036\n\032READ_LOCK_MODE_UNSPECIF" + + "IED\020\000\022\017\n\013PESSIMISTIC\020\001\022\016\n\nOPTIMISTIC\020\002\032\020" + + "\n\016PartitionedDml\032\250\002\n\010ReadOnly\022\020\n\006strong\030" + + "\001 \001(\010H\000\0228\n\022min_read_timestamp\030\002 \001(\0132\032.go" + + "ogle.protobuf.TimestampH\000\0222\n\rmax_stalene" + + "ss\030\003 \001(\0132\031.google.protobuf.DurationH\000\0224\n" + + "\016read_timestamp\030\004 \001(\0132\032.google.protobuf." + + "TimestampH\000\0224\n\017exact_staleness\030\005 \001(\0132\031.g" + + "oogle.protobuf.DurationH\000\022\035\n\025return_read" + + "_timestamp\030\006 \001(\010B\021\n\017timestamp_boundB\006\n\004m" + + "ode\"\233\001\n\013Transaction\022\n\n\002id\030\001 \001(\014\0222\n\016read_" + + "timestamp\030\002 \001(\0132\032.google.protobuf.Timest" + + "amp\022L\n\017precommit_token\030\003 \001(\01323.google.sp" + + "anner.v1.MultiplexedSessionPrecommitToke" + + "n\"\244\001\n\023TransactionSelector\022;\n\nsingle_use\030" + + "\001 \001(\0132%.google.spanner.v1.TransactionOpt" + + "ionsH\000\022\014\n\002id\030\002 \001(\014H\000\0226\n\005begin\030\003 \001(\0132%.go" + + "ogle.spanner.v1.TransactionOptionsH\000B\n\n\010" + + "selector\"L\n MultiplexedSessionPrecommitT" + + "oken\022\027\n\017precommit_token\030\001 \001(\014\022\017\n\007seq_num" + + "\030\002 \001(\005B\263\001\n\025com.google.spanner.v1B\020Transa" + + "ctionProtoP\001Z5cloud.google.com/go/spanne" + + "r/apiv1/spannerpb;spannerpb\252\002\027Google.Clo" + + "ud.Spanner.V1\312\002\027Google\\Cloud\\Spanner\\V1\352" + + "\002\032Google::Cloud::Spanner::V1b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.FieldBehaviorProto.getDescriptor(), com.google.protobuf.DurationProto.getDescriptor(), com.google.protobuf.TimestampProto.getDescriptor(), }); @@ -117,7 +128,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_v1_TransactionOptions_ReadWrite_descriptor, new java.lang.String[] { - "ReadLockMode", + "ReadLockMode", "MultiplexedSessionPreviousTransactionId", }); internal_static_google_spanner_v1_TransactionOptions_PartitionedDml_descriptor = internal_static_google_spanner_v1_TransactionOptions_descriptor.getNestedTypes().get(1); @@ -145,7 +156,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_v1_Transaction_descriptor, new java.lang.String[] { - "Id", "ReadTimestamp", + "Id", "ReadTimestamp", "PrecommitToken", }); internal_static_google_spanner_v1_TransactionSelector_descriptor = getDescriptor().getMessageTypes().get(2); @@ -155,6 +166,20 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new java.lang.String[] { "SingleUse", "Id", "Begin", "Selector", }); + internal_static_google_spanner_v1_MultiplexedSessionPrecommitToken_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_google_spanner_v1_MultiplexedSessionPrecommitToken_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_spanner_v1_MultiplexedSessionPrecommitToken_descriptor, + new java.lang.String[] { + "PrecommitToken", "SeqNum", + }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + com.google.api.FieldBehaviorProto.getDescriptor(); com.google.protobuf.DurationProto.getDescriptor(); com.google.protobuf.TimestampProto.getDescriptor(); } diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionSelector.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionSelector.java index 073e252ca9b..a33d77b45cc 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionSelector.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionSelector.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/transaction.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionSelectorOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionSelectorOrBuilder.java index 01f54dc9783..b8f9a516075 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionSelectorOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionSelectorOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/transaction.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface TransactionSelectorOrBuilder diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Type.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Type.java index ffaa2a57877..e1e06ef2cf6 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Type.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Type.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/type.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** @@ -104,8 +104,9 @@ public com.google.spanner.v1.TypeCode getCode() { * * *
    -   * If [code][google.spanner.v1.Type.code] == [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type`
    -   * is the type of the array elements.
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +   * type of the array elements.
        * 
    * * .google.spanner.v1.Type array_element_type = 2; @@ -120,8 +121,9 @@ public boolean hasArrayElementType() { * * *
    -   * If [code][google.spanner.v1.Type.code] == [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type`
    -   * is the type of the array elements.
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +   * type of the array elements.
        * 
    * * .google.spanner.v1.Type array_element_type = 2; @@ -138,8 +140,9 @@ public com.google.spanner.v1.Type getArrayElementType() { * * *
    -   * If [code][google.spanner.v1.Type.code] == [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type`
    -   * is the type of the array elements.
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +   * type of the array elements.
        * 
    * * .google.spanner.v1.Type array_element_type = 2; @@ -157,8 +160,9 @@ public com.google.spanner.v1.TypeOrBuilder getArrayElementTypeOrBuilder() { * * *
    -   * If [code][google.spanner.v1.Type.code] == [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type`
    -   * provides type information for the struct's fields.
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +   * type information for the struct's fields.
        * 
    * * .google.spanner.v1.StructType struct_type = 3; @@ -173,8 +177,9 @@ public boolean hasStructType() { * * *
    -   * If [code][google.spanner.v1.Type.code] == [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type`
    -   * provides type information for the struct's fields.
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +   * type information for the struct's fields.
        * 
    * * .google.spanner.v1.StructType struct_type = 3; @@ -191,8 +196,9 @@ public com.google.spanner.v1.StructType getStructType() { * * *
    -   * If [code][google.spanner.v1.Type.code] == [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type`
    -   * provides type information for the struct's fields.
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +   * type information for the struct's fields.
        * 
    * * .google.spanner.v1.StructType struct_type = 3; @@ -210,12 +216,14 @@ public com.google.spanner.v1.StructTypeOrBuilder getStructTypeOrBuilder() { * * *
    -   * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that disambiguates SQL type that Spanner will
    -   * use to represent values of this type during query processing. This is
    -   * necessary for some type codes because a single [TypeCode][google.spanner.v1.TypeCode] can be mapped
    -   * to different SQL types depending on the SQL dialect. [type_annotation][google.spanner.v1.Type.type_annotation]
    -   * typically is not needed to process the content of a value (it doesn't
    -   * affect serialization) and clients can ignore it on the read path.
    +   * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that
    +   * disambiguates SQL type that Spanner will use to represent values of this
    +   * type during query processing. This is necessary for some type codes because
    +   * a single [TypeCode][google.spanner.v1.TypeCode] can be mapped to different
    +   * SQL types depending on the SQL dialect.
    +   * [type_annotation][google.spanner.v1.Type.type_annotation] typically is not
    +   * needed to process the content of a value (it doesn't affect serialization)
    +   * and clients can ignore it on the read path.
        * 
    * * .google.spanner.v1.TypeAnnotationCode type_annotation = 4; @@ -230,12 +238,14 @@ public int getTypeAnnotationValue() { * * *
    -   * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that disambiguates SQL type that Spanner will
    -   * use to represent values of this type during query processing. This is
    -   * necessary for some type codes because a single [TypeCode][google.spanner.v1.TypeCode] can be mapped
    -   * to different SQL types depending on the SQL dialect. [type_annotation][google.spanner.v1.Type.type_annotation]
    -   * typically is not needed to process the content of a value (it doesn't
    -   * affect serialization) and clients can ignore it on the read path.
    +   * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that
    +   * disambiguates SQL type that Spanner will use to represent values of this
    +   * type during query processing. This is necessary for some type codes because
    +   * a single [TypeCode][google.spanner.v1.TypeCode] can be mapped to different
    +   * SQL types depending on the SQL dialect.
    +   * [type_annotation][google.spanner.v1.Type.type_annotation] typically is not
    +   * needed to process the content of a value (it doesn't affect serialization)
    +   * and clients can ignore it on the read path.
        * 
    * * .google.spanner.v1.TypeAnnotationCode type_annotation = 4; @@ -867,8 +877,9 @@ public Builder clearCode() { * * *
    -     * If [code][google.spanner.v1.Type.code] == [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type`
    -     * is the type of the array elements.
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +     * type of the array elements.
          * 
    * * .google.spanner.v1.Type array_element_type = 2; @@ -882,8 +893,9 @@ public boolean hasArrayElementType() { * * *
    -     * If [code][google.spanner.v1.Type.code] == [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type`
    -     * is the type of the array elements.
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +     * type of the array elements.
          * 
    * * .google.spanner.v1.Type array_element_type = 2; @@ -903,8 +915,9 @@ public com.google.spanner.v1.Type getArrayElementType() { * * *
    -     * If [code][google.spanner.v1.Type.code] == [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type`
    -     * is the type of the array elements.
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +     * type of the array elements.
          * 
    * * .google.spanner.v1.Type array_element_type = 2; @@ -926,8 +939,9 @@ public Builder setArrayElementType(com.google.spanner.v1.Type value) { * * *
    -     * If [code][google.spanner.v1.Type.code] == [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type`
    -     * is the type of the array elements.
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +     * type of the array elements.
          * 
    * * .google.spanner.v1.Type array_element_type = 2; @@ -946,8 +960,9 @@ public Builder setArrayElementType(com.google.spanner.v1.Type.Builder builderFor * * *
    -     * If [code][google.spanner.v1.Type.code] == [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type`
    -     * is the type of the array elements.
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +     * type of the array elements.
          * 
    * * .google.spanner.v1.Type array_element_type = 2; @@ -974,8 +989,9 @@ public Builder mergeArrayElementType(com.google.spanner.v1.Type value) { * * *
    -     * If [code][google.spanner.v1.Type.code] == [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type`
    -     * is the type of the array elements.
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +     * type of the array elements.
          * 
    * * .google.spanner.v1.Type array_element_type = 2; @@ -994,8 +1010,9 @@ public Builder clearArrayElementType() { * * *
    -     * If [code][google.spanner.v1.Type.code] == [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type`
    -     * is the type of the array elements.
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +     * type of the array elements.
          * 
    * * .google.spanner.v1.Type array_element_type = 2; @@ -1009,8 +1026,9 @@ public com.google.spanner.v1.Type.Builder getArrayElementTypeBuilder() { * * *
    -     * If [code][google.spanner.v1.Type.code] == [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type`
    -     * is the type of the array elements.
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +     * type of the array elements.
          * 
    * * .google.spanner.v1.Type array_element_type = 2; @@ -1028,8 +1046,9 @@ public com.google.spanner.v1.TypeOrBuilder getArrayElementTypeOrBuilder() { * * *
    -     * If [code][google.spanner.v1.Type.code] == [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type`
    -     * is the type of the array elements.
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +     * type of the array elements.
          * 
    * * .google.spanner.v1.Type array_element_type = 2; @@ -1061,8 +1080,9 @@ public com.google.spanner.v1.TypeOrBuilder getArrayElementTypeOrBuilder() { * * *
    -     * If [code][google.spanner.v1.Type.code] == [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type`
    -     * provides type information for the struct's fields.
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +     * type information for the struct's fields.
          * 
    * * .google.spanner.v1.StructType struct_type = 3; @@ -1076,8 +1096,9 @@ public boolean hasStructType() { * * *
    -     * If [code][google.spanner.v1.Type.code] == [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type`
    -     * provides type information for the struct's fields.
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +     * type information for the struct's fields.
          * 
    * * .google.spanner.v1.StructType struct_type = 3; @@ -1097,8 +1118,9 @@ public com.google.spanner.v1.StructType getStructType() { * * *
    -     * If [code][google.spanner.v1.Type.code] == [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type`
    -     * provides type information for the struct's fields.
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +     * type information for the struct's fields.
          * 
    * * .google.spanner.v1.StructType struct_type = 3; @@ -1120,8 +1142,9 @@ public Builder setStructType(com.google.spanner.v1.StructType value) { * * *
    -     * If [code][google.spanner.v1.Type.code] == [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type`
    -     * provides type information for the struct's fields.
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +     * type information for the struct's fields.
          * 
    * * .google.spanner.v1.StructType struct_type = 3; @@ -1140,8 +1163,9 @@ public Builder setStructType(com.google.spanner.v1.StructType.Builder builderFor * * *
    -     * If [code][google.spanner.v1.Type.code] == [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type`
    -     * provides type information for the struct's fields.
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +     * type information for the struct's fields.
          * 
    * * .google.spanner.v1.StructType struct_type = 3; @@ -1168,8 +1192,9 @@ public Builder mergeStructType(com.google.spanner.v1.StructType value) { * * *
    -     * If [code][google.spanner.v1.Type.code] == [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type`
    -     * provides type information for the struct's fields.
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +     * type information for the struct's fields.
          * 
    * * .google.spanner.v1.StructType struct_type = 3; @@ -1188,8 +1213,9 @@ public Builder clearStructType() { * * *
    -     * If [code][google.spanner.v1.Type.code] == [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type`
    -     * provides type information for the struct's fields.
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +     * type information for the struct's fields.
          * 
    * * .google.spanner.v1.StructType struct_type = 3; @@ -1203,8 +1229,9 @@ public com.google.spanner.v1.StructType.Builder getStructTypeBuilder() { * * *
    -     * If [code][google.spanner.v1.Type.code] == [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type`
    -     * provides type information for the struct's fields.
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +     * type information for the struct's fields.
          * 
    * * .google.spanner.v1.StructType struct_type = 3; @@ -1222,8 +1249,9 @@ public com.google.spanner.v1.StructTypeOrBuilder getStructTypeOrBuilder() { * * *
    -     * If [code][google.spanner.v1.Type.code] == [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type`
    -     * provides type information for the struct's fields.
    +     * If [code][google.spanner.v1.Type.code] ==
    +     * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +     * type information for the struct's fields.
          * 
    * * .google.spanner.v1.StructType struct_type = 3; @@ -1250,12 +1278,14 @@ public com.google.spanner.v1.StructTypeOrBuilder getStructTypeOrBuilder() { * * *
    -     * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that disambiguates SQL type that Spanner will
    -     * use to represent values of this type during query processing. This is
    -     * necessary for some type codes because a single [TypeCode][google.spanner.v1.TypeCode] can be mapped
    -     * to different SQL types depending on the SQL dialect. [type_annotation][google.spanner.v1.Type.type_annotation]
    -     * typically is not needed to process the content of a value (it doesn't
    -     * affect serialization) and clients can ignore it on the read path.
    +     * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that
    +     * disambiguates SQL type that Spanner will use to represent values of this
    +     * type during query processing. This is necessary for some type codes because
    +     * a single [TypeCode][google.spanner.v1.TypeCode] can be mapped to different
    +     * SQL types depending on the SQL dialect.
    +     * [type_annotation][google.spanner.v1.Type.type_annotation] typically is not
    +     * needed to process the content of a value (it doesn't affect serialization)
    +     * and clients can ignore it on the read path.
          * 
    * * .google.spanner.v1.TypeAnnotationCode type_annotation = 4; @@ -1270,12 +1300,14 @@ public int getTypeAnnotationValue() { * * *
    -     * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that disambiguates SQL type that Spanner will
    -     * use to represent values of this type during query processing. This is
    -     * necessary for some type codes because a single [TypeCode][google.spanner.v1.TypeCode] can be mapped
    -     * to different SQL types depending on the SQL dialect. [type_annotation][google.spanner.v1.Type.type_annotation]
    -     * typically is not needed to process the content of a value (it doesn't
    -     * affect serialization) and clients can ignore it on the read path.
    +     * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that
    +     * disambiguates SQL type that Spanner will use to represent values of this
    +     * type during query processing. This is necessary for some type codes because
    +     * a single [TypeCode][google.spanner.v1.TypeCode] can be mapped to different
    +     * SQL types depending on the SQL dialect.
    +     * [type_annotation][google.spanner.v1.Type.type_annotation] typically is not
    +     * needed to process the content of a value (it doesn't affect serialization)
    +     * and clients can ignore it on the read path.
          * 
    * * .google.spanner.v1.TypeAnnotationCode type_annotation = 4; @@ -1293,12 +1325,14 @@ public Builder setTypeAnnotationValue(int value) { * * *
    -     * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that disambiguates SQL type that Spanner will
    -     * use to represent values of this type during query processing. This is
    -     * necessary for some type codes because a single [TypeCode][google.spanner.v1.TypeCode] can be mapped
    -     * to different SQL types depending on the SQL dialect. [type_annotation][google.spanner.v1.Type.type_annotation]
    -     * typically is not needed to process the content of a value (it doesn't
    -     * affect serialization) and clients can ignore it on the read path.
    +     * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that
    +     * disambiguates SQL type that Spanner will use to represent values of this
    +     * type during query processing. This is necessary for some type codes because
    +     * a single [TypeCode][google.spanner.v1.TypeCode] can be mapped to different
    +     * SQL types depending on the SQL dialect.
    +     * [type_annotation][google.spanner.v1.Type.type_annotation] typically is not
    +     * needed to process the content of a value (it doesn't affect serialization)
    +     * and clients can ignore it on the read path.
          * 
    * * .google.spanner.v1.TypeAnnotationCode type_annotation = 4; @@ -1315,12 +1349,14 @@ public com.google.spanner.v1.TypeAnnotationCode getTypeAnnotation() { * * *
    -     * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that disambiguates SQL type that Spanner will
    -     * use to represent values of this type during query processing. This is
    -     * necessary for some type codes because a single [TypeCode][google.spanner.v1.TypeCode] can be mapped
    -     * to different SQL types depending on the SQL dialect. [type_annotation][google.spanner.v1.Type.type_annotation]
    -     * typically is not needed to process the content of a value (it doesn't
    -     * affect serialization) and clients can ignore it on the read path.
    +     * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that
    +     * disambiguates SQL type that Spanner will use to represent values of this
    +     * type during query processing. This is necessary for some type codes because
    +     * a single [TypeCode][google.spanner.v1.TypeCode] can be mapped to different
    +     * SQL types depending on the SQL dialect.
    +     * [type_annotation][google.spanner.v1.Type.type_annotation] typically is not
    +     * needed to process the content of a value (it doesn't affect serialization)
    +     * and clients can ignore it on the read path.
          * 
    * * .google.spanner.v1.TypeAnnotationCode type_annotation = 4; @@ -1341,12 +1377,14 @@ public Builder setTypeAnnotation(com.google.spanner.v1.TypeAnnotationCode value) * * *
    -     * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that disambiguates SQL type that Spanner will
    -     * use to represent values of this type during query processing. This is
    -     * necessary for some type codes because a single [TypeCode][google.spanner.v1.TypeCode] can be mapped
    -     * to different SQL types depending on the SQL dialect. [type_annotation][google.spanner.v1.Type.type_annotation]
    -     * typically is not needed to process the content of a value (it doesn't
    -     * affect serialization) and clients can ignore it on the read path.
    +     * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that
    +     * disambiguates SQL type that Spanner will use to represent values of this
    +     * type during query processing. This is necessary for some type codes because
    +     * a single [TypeCode][google.spanner.v1.TypeCode] can be mapped to different
    +     * SQL types depending on the SQL dialect.
    +     * [type_annotation][google.spanner.v1.Type.type_annotation] typically is not
    +     * needed to process the content of a value (it doesn't affect serialization)
    +     * and clients can ignore it on the read path.
          * 
    * * .google.spanner.v1.TypeAnnotationCode type_annotation = 4; diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeAnnotationCode.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeAnnotationCode.java index b8879900205..6cbae9bdd1c 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeAnnotationCode.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeAnnotationCode.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/type.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** @@ -48,11 +48,12 @@ public enum TypeAnnotationCode implements com.google.protobuf.ProtocolMessageEnu * *
        * PostgreSQL compatible NUMERIC type. This annotation needs to be applied to
    -   * [Type][google.spanner.v1.Type] instances having [NUMERIC][google.spanner.v1.TypeCode.NUMERIC]
    -   * type code to specify that values of this type should be treated as
    -   * PostgreSQL NUMERIC values. Currently this annotation is always needed for
    -   * [NUMERIC][google.spanner.v1.TypeCode.NUMERIC] when a client interacts with PostgreSQL-enabled
    -   * Spanner databases.
    +   * [Type][google.spanner.v1.Type] instances having
    +   * [NUMERIC][google.spanner.v1.TypeCode.NUMERIC] type code to specify that
    +   * values of this type should be treated as PostgreSQL NUMERIC values.
    +   * Currently this annotation is always needed for
    +   * [NUMERIC][google.spanner.v1.TypeCode.NUMERIC] when a client interacts with
    +   * PostgreSQL-enabled Spanner databases.
        * 
    * * PG_NUMERIC = 2; @@ -63,11 +64,11 @@ public enum TypeAnnotationCode implements com.google.protobuf.ProtocolMessageEnu * *
        * PostgreSQL compatible JSONB type. This annotation needs to be applied to
    -   * [Type][google.spanner.v1.Type] instances having [JSON][google.spanner.v1.TypeCode.JSON]
    -   * type code to specify that values of this type should be treated as
    -   * PostgreSQL JSONB values. Currently this annotation is always needed for
    -   * [JSON][google.spanner.v1.TypeCode.JSON] when a client interacts with PostgreSQL-enabled
    -   * Spanner databases.
    +   * [Type][google.spanner.v1.Type] instances having
    +   * [JSON][google.spanner.v1.TypeCode.JSON] type code to specify that values of
    +   * this type should be treated as PostgreSQL JSONB values. Currently this
    +   * annotation is always needed for [JSON][google.spanner.v1.TypeCode.JSON]
    +   * when a client interacts with PostgreSQL-enabled Spanner databases.
        * 
    * * PG_JSONB = 3; @@ -103,11 +104,12 @@ public enum TypeAnnotationCode implements com.google.protobuf.ProtocolMessageEnu * *
        * PostgreSQL compatible NUMERIC type. This annotation needs to be applied to
    -   * [Type][google.spanner.v1.Type] instances having [NUMERIC][google.spanner.v1.TypeCode.NUMERIC]
    -   * type code to specify that values of this type should be treated as
    -   * PostgreSQL NUMERIC values. Currently this annotation is always needed for
    -   * [NUMERIC][google.spanner.v1.TypeCode.NUMERIC] when a client interacts with PostgreSQL-enabled
    -   * Spanner databases.
    +   * [Type][google.spanner.v1.Type] instances having
    +   * [NUMERIC][google.spanner.v1.TypeCode.NUMERIC] type code to specify that
    +   * values of this type should be treated as PostgreSQL NUMERIC values.
    +   * Currently this annotation is always needed for
    +   * [NUMERIC][google.spanner.v1.TypeCode.NUMERIC] when a client interacts with
    +   * PostgreSQL-enabled Spanner databases.
        * 
    * * PG_NUMERIC = 2; @@ -118,11 +120,11 @@ public enum TypeAnnotationCode implements com.google.protobuf.ProtocolMessageEnu * *
        * PostgreSQL compatible JSONB type. This annotation needs to be applied to
    -   * [Type][google.spanner.v1.Type] instances having [JSON][google.spanner.v1.TypeCode.JSON]
    -   * type code to specify that values of this type should be treated as
    -   * PostgreSQL JSONB values. Currently this annotation is always needed for
    -   * [JSON][google.spanner.v1.TypeCode.JSON] when a client interacts with PostgreSQL-enabled
    -   * Spanner databases.
    +   * [Type][google.spanner.v1.Type] instances having
    +   * [JSON][google.spanner.v1.TypeCode.JSON] type code to specify that values of
    +   * this type should be treated as PostgreSQL JSONB values. Currently this
    +   * annotation is always needed for [JSON][google.spanner.v1.TypeCode.JSON]
    +   * when a client interacts with PostgreSQL-enabled Spanner databases.
        * 
    * * PG_JSONB = 3; diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeCode.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeCode.java index 43b24d84221..9c36f6c971b 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeCode.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeCode.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/type.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; /** @@ -163,14 +163,14 @@ public enum TypeCode implements com.google.protobuf.ProtocolMessageEnum { * *
        * Encoded as `string`, in decimal format or scientific notation format.
    -   * <br>Decimal format:
    -   * <br>`[+-]Digits[.[Digits]]` or
    -   * <br>`[+-][Digits].Digits`
    +   * Decimal format:
    +   * `[+-]Digits[.[Digits]]` or
    +   * `[+-][Digits].Digits`
        *
        * Scientific notation:
    -   * <br>`[+-]Digits[.[Digits]][ExponentIndicator[+-]Digits]` or
    -   * <br>`[+-][Digits].Digits[ExponentIndicator[+-]Digits]`
    -   * <br>(ExponentIndicator is `"e"` or `"E"`)
    +   * `[+-]Digits[.[Digits]][ExponentIndicator[+-]Digits]` or
    +   * `[+-][Digits].Digits[ExponentIndicator[+-]Digits]`
    +   * (ExponentIndicator is `"e"` or `"E"`)
        * 
    * * NUMERIC = 10; @@ -214,6 +214,20 @@ public enum TypeCode implements com.google.protobuf.ProtocolMessageEnum { * ENUM = 14; */ ENUM(14), + /** + * + * + *
    +   * Encoded as `string`, in `ISO8601` duration format -
    +   * `P[n]Y[n]M[n]DT[n]H[n]M[n[.fraction]]S`
    +   * where `n` is an integer.
    +   * For example, `P1Y2M3DT4H5M6.5S` represents time duration of 1 year, 2
    +   * months, 3 days, 4 hours, 5 minutes, and 6.5 seconds.
    +   * 
    + * + * INTERVAL = 16; + */ + INTERVAL(16), UNRECOGNIZED(-1), ; @@ -345,14 +359,14 @@ public enum TypeCode implements com.google.protobuf.ProtocolMessageEnum { * *
        * Encoded as `string`, in decimal format or scientific notation format.
    -   * <br>Decimal format:
    -   * <br>`[+-]Digits[.[Digits]]` or
    -   * <br>`[+-][Digits].Digits`
    +   * Decimal format:
    +   * `[+-]Digits[.[Digits]]` or
    +   * `[+-][Digits].Digits`
        *
        * Scientific notation:
    -   * <br>`[+-]Digits[.[Digits]][ExponentIndicator[+-]Digits]` or
    -   * <br>`[+-][Digits].Digits[ExponentIndicator[+-]Digits]`
    -   * <br>(ExponentIndicator is `"e"` or `"E"`)
    +   * `[+-]Digits[.[Digits]][ExponentIndicator[+-]Digits]` or
    +   * `[+-][Digits].Digits[ExponentIndicator[+-]Digits]`
    +   * (ExponentIndicator is `"e"` or `"E"`)
        * 
    * * NUMERIC = 10; @@ -396,6 +410,20 @@ public enum TypeCode implements com.google.protobuf.ProtocolMessageEnum { * ENUM = 14; */ public static final int ENUM_VALUE = 14; + /** + * + * + *
    +   * Encoded as `string`, in `ISO8601` duration format -
    +   * `P[n]Y[n]M[n]DT[n]H[n]M[n[.fraction]]S`
    +   * where `n` is an integer.
    +   * For example, `P1Y2M3DT4H5M6.5S` represents time duration of 1 year, 2
    +   * months, 3 days, 4 hours, 5 minutes, and 6.5 seconds.
    +   * 
    + * + * INTERVAL = 16; + */ + public static final int INTERVAL_VALUE = 16; public final int getNumber() { if (this == UNRECOGNIZED) { @@ -451,6 +479,8 @@ public static TypeCode forNumber(int value) { return PROTO; case 14: return ENUM; + case 16: + return INTERVAL; default: return null; } diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeOrBuilder.java index 6ab086bb111..e93e4976c57 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeOrBuilder.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/type.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public interface TypeOrBuilder @@ -53,8 +53,9 @@ public interface TypeOrBuilder * * *
    -   * If [code][google.spanner.v1.Type.code] == [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type`
    -   * is the type of the array elements.
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +   * type of the array elements.
        * 
    * * .google.spanner.v1.Type array_element_type = 2; @@ -66,8 +67,9 @@ public interface TypeOrBuilder * * *
    -   * If [code][google.spanner.v1.Type.code] == [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type`
    -   * is the type of the array elements.
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +   * type of the array elements.
        * 
    * * .google.spanner.v1.Type array_element_type = 2; @@ -79,8 +81,9 @@ public interface TypeOrBuilder * * *
    -   * If [code][google.spanner.v1.Type.code] == [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type`
    -   * is the type of the array elements.
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the
    +   * type of the array elements.
        * 
    * * .google.spanner.v1.Type array_element_type = 2; @@ -91,8 +94,9 @@ public interface TypeOrBuilder * * *
    -   * If [code][google.spanner.v1.Type.code] == [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type`
    -   * provides type information for the struct's fields.
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +   * type information for the struct's fields.
        * 
    * * .google.spanner.v1.StructType struct_type = 3; @@ -104,8 +108,9 @@ public interface TypeOrBuilder * * *
    -   * If [code][google.spanner.v1.Type.code] == [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type`
    -   * provides type information for the struct's fields.
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +   * type information for the struct's fields.
        * 
    * * .google.spanner.v1.StructType struct_type = 3; @@ -117,8 +122,9 @@ public interface TypeOrBuilder * * *
    -   * If [code][google.spanner.v1.Type.code] == [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type`
    -   * provides type information for the struct's fields.
    +   * If [code][google.spanner.v1.Type.code] ==
    +   * [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides
    +   * type information for the struct's fields.
        * 
    * * .google.spanner.v1.StructType struct_type = 3; @@ -129,12 +135,14 @@ public interface TypeOrBuilder * * *
    -   * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that disambiguates SQL type that Spanner will
    -   * use to represent values of this type during query processing. This is
    -   * necessary for some type codes because a single [TypeCode][google.spanner.v1.TypeCode] can be mapped
    -   * to different SQL types depending on the SQL dialect. [type_annotation][google.spanner.v1.Type.type_annotation]
    -   * typically is not needed to process the content of a value (it doesn't
    -   * affect serialization) and clients can ignore it on the read path.
    +   * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that
    +   * disambiguates SQL type that Spanner will use to represent values of this
    +   * type during query processing. This is necessary for some type codes because
    +   * a single [TypeCode][google.spanner.v1.TypeCode] can be mapped to different
    +   * SQL types depending on the SQL dialect.
    +   * [type_annotation][google.spanner.v1.Type.type_annotation] typically is not
    +   * needed to process the content of a value (it doesn't affect serialization)
    +   * and clients can ignore it on the read path.
        * 
    * * .google.spanner.v1.TypeAnnotationCode type_annotation = 4; @@ -146,12 +154,14 @@ public interface TypeOrBuilder * * *
    -   * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that disambiguates SQL type that Spanner will
    -   * use to represent values of this type during query processing. This is
    -   * necessary for some type codes because a single [TypeCode][google.spanner.v1.TypeCode] can be mapped
    -   * to different SQL types depending on the SQL dialect. [type_annotation][google.spanner.v1.Type.type_annotation]
    -   * typically is not needed to process the content of a value (it doesn't
    -   * affect serialization) and clients can ignore it on the read path.
    +   * The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that
    +   * disambiguates SQL type that Spanner will use to represent values of this
    +   * type during query processing. This is necessary for some type codes because
    +   * a single [TypeCode][google.spanner.v1.TypeCode] can be mapped to different
    +   * SQL types depending on the SQL dialect.
    +   * [type_annotation][google.spanner.v1.Type.type_annotation] typically is not
    +   * needed to process the content of a value (it doesn't affect serialization)
    +   * and clients can ignore it on the read path.
        * 
    * * .google.spanner.v1.TypeAnnotationCode type_annotation = 4; diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeProto.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeProto.java index 2306effc446..3c41d2585b4 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeProto.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TypeProto.java @@ -16,7 +16,7 @@ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/spanner/v1/type.proto -// Protobuf Java Version: 3.25.3 +// Protobuf Java Version: 3.25.5 package com.google.spanner.v1; public final class TypeProto { @@ -60,19 +60,20 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "pe_fqn\030\005 \001(\t\"\177\n\nStructType\0223\n\006fields\030\001 \003" + "(\0132#.google.spanner.v1.StructType.Field\032" + "<\n\005Field\022\014\n\004name\030\001 \001(\t\022%\n\004type\030\002 \001(\0132\027.g" - + "oogle.spanner.v1.Type*\307\001\n\010TypeCode\022\031\n\025TY" + + "oogle.spanner.v1.Type*\325\001\n\010TypeCode\022\031\n\025TY" + "PE_CODE_UNSPECIFIED\020\000\022\010\n\004BOOL\020\001\022\t\n\005INT64" + "\020\002\022\013\n\007FLOAT64\020\003\022\013\n\007FLOAT32\020\017\022\r\n\tTIMESTAM" + "P\020\004\022\010\n\004DATE\020\005\022\n\n\006STRING\020\006\022\t\n\005BYTES\020\007\022\t\n\005" + "ARRAY\020\010\022\n\n\006STRUCT\020\t\022\013\n\007NUMERIC\020\n\022\010\n\004JSON" - + "\020\013\022\t\n\005PROTO\020\r\022\010\n\004ENUM\020\016*d\n\022TypeAnnotatio" - + "nCode\022$\n TYPE_ANNOTATION_CODE_UNSPECIFIE" - + "D\020\000\022\016\n\nPG_NUMERIC\020\002\022\014\n\010PG_JSONB\020\003\022\n\n\006PG_" - + "OID\020\004B\254\001\n\025com.google.spanner.v1B\tTypePro" - + "toP\001Z5cloud.google.com/go/spanner/apiv1/" - + "spannerpb;spannerpb\252\002\027Google.Cloud.Spann" - + "er.V1\312\002\027Google\\Cloud\\Spanner\\V1\352\002\032Google" - + "::Cloud::Spanner::V1b\006proto3" + + "\020\013\022\t\n\005PROTO\020\r\022\010\n\004ENUM\020\016\022\014\n\010INTERVAL\020\020*d\n" + + "\022TypeAnnotationCode\022$\n TYPE_ANNOTATION_C" + + "ODE_UNSPECIFIED\020\000\022\016\n\nPG_NUMERIC\020\002\022\014\n\010PG_" + + "JSONB\020\003\022\n\n\006PG_OID\020\004B\254\001\n\025com.google.spann" + + "er.v1B\tTypeProtoP\001Z5cloud.google.com/go/" + + "spanner/apiv1/spannerpb;spannerpb\252\002\027Goog" + + "le.Cloud.Spanner.V1\312\002\027Google\\Cloud\\Spann" + + "er\\V1\352\002\032Google::Cloud::Spanner::V1b\006prot" + + "o3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( diff --git a/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/commit_response.proto b/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/commit_response.proto index 436a002b86f..d5f9b15d5b3 100644 --- a/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/commit_response.proto +++ b/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/commit_response.proto @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,6 +17,7 @@ syntax = "proto3"; package google.spanner.v1; import "google/protobuf/timestamp.proto"; +import "google/spanner/v1/transaction.proto"; option csharp_namespace = "Google.Cloud.Spanner.V1"; option go_package = "cloud.google.com/go/spanner/apiv1/spannerpb;spannerpb"; @@ -47,4 +48,12 @@ message CommitResponse { // For more information, see // [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats]. CommitStats commit_stats = 2; + + // Clients should examine and retry the commit if any of the following + // reasons are populated. + oneof MultiplexedSessionRetry { + // If specified, transaction has not committed yet. + // Clients must retry the commit with the new precommit token. + MultiplexedSessionPrecommitToken precommit_token = 4; + } } diff --git a/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/keys.proto b/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/keys.proto index 8fb4757f5bb..82f073b964f 100644 --- a/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/keys.proto +++ b/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/keys.proto @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/mutation.proto b/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/mutation.proto index cced61f33b3..7fbf93f8a97 100644 --- a/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/mutation.proto +++ b/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/mutation.proto @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/query_plan.proto b/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/query_plan.proto index c0903bdd7d3..ba18055e33e 100644 --- a/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/query_plan.proto +++ b/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/query_plan.proto @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/result_set.proto b/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/result_set.proto index cfa5719c4a2..0b8aabf8679 100644 --- a/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/result_set.proto +++ b/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/result_set.proto @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,6 +16,7 @@ syntax = "proto3"; package google.spanner.v1; +import "google/api/field_behavior.proto"; import "google/protobuf/struct.proto"; import "google/spanner/v1/query_plan.proto"; import "google/spanner/v1/transaction.proto"; @@ -53,6 +54,16 @@ message ResultSet { // Other fields may or may not be populated, based on the // [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]. ResultSetStats stats = 3; + + // Optional. A precommit token will be included if the read-write transaction + // is on a multiplexed session. + // The precommit token with the highest sequence number from this transaction + // attempt should be passed to the + // [Commit][google.spanner.v1.Spanner.Commit] request for this transaction. + // This feature is not yet supported and will result in an UNIMPLEMENTED + // error. + MultiplexedSessionPrecommitToken precommit_token = 5 + [(google.api.field_behavior) = OPTIONAL]; } // Partial results from a streaming read or SQL query. Streaming reads and @@ -157,6 +168,16 @@ message PartialResultSet { // This field will also be present in the last response for DML // statements. ResultSetStats stats = 5; + + // Optional. A precommit token will be included if the read-write transaction + // is on a multiplexed session. + // The precommit token with the highest sequence number from this transaction + // attempt should be passed to the + // [Commit][google.spanner.v1.Spanner.Commit] request for this transaction. + // This feature is not yet supported and will result in an UNIMPLEMENTED + // error. + MultiplexedSessionPrecommitToken precommit_token = 8 + [(google.api.field_behavior) = OPTIONAL]; } // Metadata about a [ResultSet][google.spanner.v1.ResultSet] or [PartialResultSet][google.spanner.v1.PartialResultSet]. diff --git a/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/spanner.proto b/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/spanner.proto index 440ebf78546..847815464a4 100644 --- a/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/spanner.proto +++ b/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/spanner.proto @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -628,9 +628,19 @@ message ExecuteSqlRequest { // execution statistics information. PLAN = 1; - // This mode returns both the query plan and the execution statistics along - // with the results. + // This mode returns the query plan, overall execution statistics, + // operator level execution statistics along with the results. This has a + // performance overhead compared to the other modes. It is not recommended + // to use this mode for production traffic. PROFILE = 2; + + // This mode returns the overall (but not operator-level) execution + // statistics along with the results. + WITH_STATS = 3; + + // This mode returns the query plan, overall (but not operator-level) + // execution statistics along with the results. + WITH_PLAN_AND_STATS = 4; } // Query optimizer configuration. @@ -899,6 +909,16 @@ message ExecuteBatchDmlResponse { // If all DML statements are executed successfully, the status is `OK`. // Otherwise, the error status of the first failed statement. google.rpc.Status status = 2; + + // Optional. A precommit token will be included if the read-write transaction + // is on a multiplexed session. + // The precommit token with the highest sequence number from this transaction + // attempt should be passed to the + // [Commit][google.spanner.v1.Spanner.Commit] request for this transaction. + // This feature is not yet supported and will result in an UNIMPLEMENTED + // error. + MultiplexedSessionPrecommitToken precommit_token = 3 + [(google.api.field_behavior) = OPTIONAL]; } // Options for a PartitionQueryRequest and @@ -1043,6 +1063,71 @@ message PartitionResponse { // The request for [Read][google.spanner.v1.Spanner.Read] and // [StreamingRead][google.spanner.v1.Spanner.StreamingRead]. message ReadRequest { + // An option to control the order in which rows are returned from a read. + enum OrderBy { + // Default value. + // + // ORDER_BY_UNSPECIFIED is equivalent to ORDER_BY_PRIMARY_KEY. + ORDER_BY_UNSPECIFIED = 0; + + // Read rows are returned in primary key order. + // + // In the event that this option is used in conjunction with the + // `partition_token` field, the API will return an `INVALID_ARGUMENT` error. + ORDER_BY_PRIMARY_KEY = 1; + + // Read rows are returned in any order. + ORDER_BY_NO_ORDER = 2; + } + + // A lock hint mechanism for reads done within a transaction. + enum LockHint { + // Default value. + // + // LOCK_HINT_UNSPECIFIED is equivalent to LOCK_HINT_SHARED. + LOCK_HINT_UNSPECIFIED = 0; + + // Acquire shared locks. + // + // By default when you perform a read as part of a read-write transaction, + // Spanner acquires shared read locks, which allows other reads to still + // access the data until your transaction is ready to commit. When your + // transaction is committing and writes are being applied, the transaction + // attempts to upgrade to an exclusive lock for any data you are writing. + // For more information about locks, see [Lock + // modes](https://cloud.google.com/spanner/docs/introspection/lock-statistics#explain-lock-modes). + LOCK_HINT_SHARED = 1; + + // Acquire exclusive locks. + // + // Requesting exclusive locks is beneficial if you observe high write + // contention, which means you notice that multiple transactions are + // concurrently trying to read and write to the same data, resulting in a + // large number of aborts. This problem occurs when two transactions + // initially acquire shared locks and then both try to upgrade to exclusive + // locks at the same time. In this situation both transactions are waiting + // for the other to give up their lock, resulting in a deadlocked situation. + // Spanner is able to detect this occurring and force one of the + // transactions to abort. However, this is a slow and expensive operation + // and results in lower performance. In this case it makes sense to acquire + // exclusive locks at the start of the transaction because then when + // multiple transactions try to act on the same data, they automatically get + // serialized. Each transaction waits its turn to acquire the lock and + // avoids getting into deadlock situations. + // + // Because the exclusive lock hint is just a hint, it should not be + // considered equivalent to a mutex. In other words, you should not use + // Spanner exclusive locks as a mutual exclusion mechanism for the execution + // of code outside of Spanner. + // + // **Note:** Request exclusive locks judiciously because they block others + // from reading that data for the entire transaction, rather than just when + // the writes are being performed. Unless you observe high write contention, + // you should use the default of shared read locks so you don't prematurely + // block other clients from reading the data that you're writing to. + LOCK_HINT_EXCLUSIVE = 2; + } + // Required. The session in which the read should be performed. string session = 1 [ (google.api.field_behavior) = REQUIRED, @@ -1117,6 +1202,19 @@ message ReadRequest { // If the field is set to `true` but the request does not set // `partition_token`, the API returns an `INVALID_ARGUMENT` error. bool data_boost_enabled = 15; + + // Optional. Order for the returned rows. + // + // By default, Spanner will return result rows in primary key order except for + // PartitionRead requests. For applications that do not require rows to be + // returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting + // `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval, + // resulting in lower latencies in certain cases (e.g. bulk point lookups). + OrderBy order_by = 16 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Lock Hint for the request, it can only be used with read-write + // transactions. + LockHint lock_hint = 17 [(google.api.field_behavior) = OPTIONAL]; } // The request for @@ -1137,6 +1235,14 @@ message BeginTransactionRequest { // transaction, set it on the reads and writes that are part of this // transaction instead. RequestOptions request_options = 3; + + // Optional. Required for read-write transactions on a multiplexed session + // that commit mutations but do not perform any reads or queries. Clients + // should randomly select one of the mutations from the mutation set and send + // it as a part of this request. + // This feature is not yet supported and will result in an UNIMPLEMENTED + // error. + Mutation mutation_key = 4 [(google.api.field_behavior) = OPTIONAL]; } // The request for [Commit][google.spanner.v1.Spanner.Commit]. @@ -1184,6 +1290,15 @@ message CommitRequest { // Common options for this request. RequestOptions request_options = 6; + + // Optional. If the read-write transaction was executed on a multiplexed + // session, the precommit token with the highest sequence number received in + // this transaction attempt, should be included here. Failing to do so will + // result in a FailedPrecondition error. + // This feature is not yet supported and will result in an UNIMPLEMENTED + // error. + MultiplexedSessionPrecommitToken precommit_token = 9 + [(google.api.field_behavior) = OPTIONAL]; } // The request for [Rollback][google.spanner.v1.Spanner.Rollback]. diff --git a/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/transaction.proto b/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/transaction.proto index e3f22ee3c98..fe564538466 100644 --- a/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/transaction.proto +++ b/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/transaction.proto @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,6 +16,7 @@ syntax = "proto3"; package google.spanner.v1; +import "google/api/field_behavior.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; @@ -378,6 +379,14 @@ message TransactionOptions { // Read lock mode for the transaction. ReadLockMode read_lock_mode = 1; + + // Optional. Clients should pass the transaction ID of the previous + // transaction attempt that was aborted if this transaction is being + // executed on a multiplexed session. + // This feature is not yet supported and will result in an UNIMPLEMENTED + // error. + bytes multiplexed_session_previous_transaction_id = 2 + [(google.api.field_behavior) = OPTIONAL]; } // Message type to initiate a Partitioned DML transaction. @@ -512,6 +521,17 @@ message Transaction { // A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. // Example: `"2014-10-02T15:01:23.045123456Z"`. google.protobuf.Timestamp read_timestamp = 2; + + // A precommit token will be included in the response of a BeginTransaction + // request if the read-write transaction is on a multiplexed session and + // a mutation_key was specified in the + // [BeginTransaction][google.spanner.v1.BeginTransactionRequest]. + // The precommit token with the highest sequence number from this transaction + // attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit] + // request for this transaction. + // This feature is not yet supported and will result in an UNIMPLEMENTED + // error. + MultiplexedSessionPrecommitToken precommit_token = 3; } // This message is used to select the transaction in which a @@ -539,3 +559,17 @@ message TransactionSelector { TransactionOptions begin = 3; } } + +// When a read-write transaction is executed on a multiplexed session, +// this precommit token is sent back to the client +// as a part of the [Transaction] message in the BeginTransaction response and +// also as a part of the [ResultSet] and [PartialResultSet] responses. +message MultiplexedSessionPrecommitToken { + // Opaque precommit token. + bytes precommit_token = 1; + + // An incrementing seq number is generated on every precommit token + // that is returned. Clients should remember the precommit token with the + // highest sequence number from the current transaction attempt. + int32 seq_num = 2; +} diff --git a/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/type.proto b/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/type.proto index 8e28fa7fd63..734cfb54cda 100644 --- a/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/type.proto +++ b/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/type.proto @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -32,20 +32,24 @@ message Type { // Required. The [TypeCode][google.spanner.v1.TypeCode] for this type. TypeCode code = 1 [(google.api.field_behavior) = REQUIRED]; - // If [code][google.spanner.v1.Type.code] == [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` - // is the type of the array elements. + // If [code][google.spanner.v1.Type.code] == + // [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` is the + // type of the array elements. Type array_element_type = 2; - // If [code][google.spanner.v1.Type.code] == [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` - // provides type information for the struct's fields. + // If [code][google.spanner.v1.Type.code] == + // [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` provides + // type information for the struct's fields. StructType struct_type = 3; - // The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that disambiguates SQL type that Spanner will - // use to represent values of this type during query processing. This is - // necessary for some type codes because a single [TypeCode][google.spanner.v1.TypeCode] can be mapped - // to different SQL types depending on the SQL dialect. [type_annotation][google.spanner.v1.Type.type_annotation] - // typically is not needed to process the content of a value (it doesn't - // affect serialization) and clients can ignore it on the read path. + // The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that + // disambiguates SQL type that Spanner will use to represent values of this + // type during query processing. This is necessary for some type codes because + // a single [TypeCode][google.spanner.v1.TypeCode] can be mapped to different + // SQL types depending on the SQL dialect. + // [type_annotation][google.spanner.v1.Type.type_annotation] typically is not + // needed to process the content of a value (it doesn't affect serialization) + // and clients can ignore it on the read path. TypeAnnotationCode type_annotation = 4; // If [code][google.spanner.v1.Type.code] == @@ -56,7 +60,8 @@ message Type { string proto_type_fqn = 5; } -// `StructType` defines the fields of a [STRUCT][google.spanner.v1.TypeCode.STRUCT] type. +// `StructType` defines the fields of a +// [STRUCT][google.spanner.v1.TypeCode.STRUCT] type. message StructType { // Message representing a single field of a struct. message Field { @@ -76,9 +81,9 @@ message StructType { // The list of fields that make up this struct. Order is // significant, because values of this struct type are represented as // lists, where the order of field values matches the order of - // fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - // matches the order of columns in a read request, or the order of - // fields in the `SELECT` clause of a query. + // fields in the [StructType][google.spanner.v1.StructType]. In turn, the + // order of fields matches the order of columns in a read request, or the + // order of fields in the `SELECT` clause of a query. repeated Field fields = 1; } @@ -137,14 +142,14 @@ enum TypeCode { STRUCT = 9; // Encoded as `string`, in decimal format or scientific notation format. - //
    Decimal format: - //
    `[+-]Digits[.[Digits]]` or - //
    `[+-][Digits].Digits` + // Decimal format: + // `[+-]Digits[.[Digits]]` or + // `[+-][Digits].Digits` // // Scientific notation: - //
    `[+-]Digits[.[Digits]][ExponentIndicator[+-]Digits]` or - //
    `[+-][Digits].Digits[ExponentIndicator[+-]Digits]` - //
    (ExponentIndicator is `"e"` or `"E"`) + // `[+-]Digits[.[Digits]][ExponentIndicator[+-]Digits]` or + // `[+-][Digits].Digits[ExponentIndicator[+-]Digits]` + // (ExponentIndicator is `"e"` or `"E"`) NUMERIC = 10; // Encoded as a JSON-formatted `string` as described in RFC 7159. The @@ -163,6 +168,13 @@ enum TypeCode { // Encoded as `string`, in decimal format. ENUM = 14; + + // Encoded as `string`, in `ISO8601` duration format - + // `P[n]Y[n]M[n]DT[n]H[n]M[n[.fraction]]S` + // where `n` is an integer. + // For example, `P1Y2M3DT4H5M6.5S` represents time duration of 1 year, 2 + // months, 3 days, 4 hours, 5 minutes, and 6.5 seconds. + INTERVAL = 16; } // `TypeAnnotationCode` is used as a part of [Type][google.spanner.v1.Type] to @@ -175,19 +187,20 @@ enum TypeAnnotationCode { TYPE_ANNOTATION_CODE_UNSPECIFIED = 0; // PostgreSQL compatible NUMERIC type. This annotation needs to be applied to - // [Type][google.spanner.v1.Type] instances having [NUMERIC][google.spanner.v1.TypeCode.NUMERIC] - // type code to specify that values of this type should be treated as - // PostgreSQL NUMERIC values. Currently this annotation is always needed for - // [NUMERIC][google.spanner.v1.TypeCode.NUMERIC] when a client interacts with PostgreSQL-enabled - // Spanner databases. + // [Type][google.spanner.v1.Type] instances having + // [NUMERIC][google.spanner.v1.TypeCode.NUMERIC] type code to specify that + // values of this type should be treated as PostgreSQL NUMERIC values. + // Currently this annotation is always needed for + // [NUMERIC][google.spanner.v1.TypeCode.NUMERIC] when a client interacts with + // PostgreSQL-enabled Spanner databases. PG_NUMERIC = 2; // PostgreSQL compatible JSONB type. This annotation needs to be applied to - // [Type][google.spanner.v1.Type] instances having [JSON][google.spanner.v1.TypeCode.JSON] - // type code to specify that values of this type should be treated as - // PostgreSQL JSONB values. Currently this annotation is always needed for - // [JSON][google.spanner.v1.TypeCode.JSON] when a client interacts with PostgreSQL-enabled - // Spanner databases. + // [Type][google.spanner.v1.Type] instances having + // [JSON][google.spanner.v1.TypeCode.JSON] type code to specify that values of + // this type should be treated as PostgreSQL JSONB values. Currently this + // annotation is always needed for [JSON][google.spanner.v1.TypeCode.JSON] + // when a client interacts with PostgreSQL-enabled Spanner databases. PG_JSONB = 3; // PostgreSQL compatible OID type. This annotation can be used by a client diff --git a/renovate.json b/renovate.json index 33c03a11c6a..167bf279fe7 100644 --- a/renovate.json +++ b/renovate.json @@ -1,4 +1,6 @@ + { + "extends": [ ":separateMajorReleases", ":combinePatchMinorReleases", @@ -23,7 +25,9 @@ "fileMatch": [ "^.kokoro/presubmit/graalvm-native.*.cfg$" ], - "matchStrings": ["value: \"gcr.io/cloud-devrel-public-resources/graalvm.*:(?.*?)\""], + "matchStrings": [ + "value: \"gcr.io/cloud-devrel-public-resources/graalvm.*:(?.*?)\"" + ], "depNameTemplate": "com.google.cloud:sdk-platform-java-config", "datasourceTemplate": "maven" }, @@ -32,9 +36,21 @@ "fileMatch": [ "^.github/workflows/unmanaged_dependency_check.yaml$" ], - "matchStrings": ["uses: googleapis/sdk-platform-java/java-shared-dependencies/unmanaged-dependency-check@google-cloud-shared-dependencies/v(?.+?)\\n"], + "matchStrings": [ + "uses: googleapis/sdk-platform-java/java-shared-dependencies/unmanaged-dependency-check@google-cloud-shared-dependencies/v(?.+?)\\n" + ], "depNameTemplate": "com.google.cloud:sdk-platform-java-config", "datasourceTemplate": "maven" + }, + { + "fileMatch": [ + "^.github/workflows/hermetic_library_generation.yaml$" + ], + "matchStrings": [ + "uses: googleapis/sdk-platform-java/.github/scripts@v(?.+?)\\n" + ], + "depNameTemplate": "com.google.api:gapic-generator-java", + "datasourceTemplate": "maven" } ], "packageRules": [ @@ -95,8 +111,15 @@ "^com.fasterxml.jackson.core" ], "groupName": "jackson dependencies" + }, + { + "matchPackagePatterns": [ + "^com.google.api:gapic-generator-java", + "^com.google.cloud:sdk-platform-java-config" + ], + "groupName": "SDK platform Java dependencies" } ], "semanticCommits": true, "dependencyDashboard": true -} +} \ No newline at end of file diff --git a/samples/install-without-bom/pom.xml b/samples/install-without-bom/pom.xml index bc846dfdc77..d2a76cc73d3 100644 --- a/samples/install-without-bom/pom.xml +++ b/samples/install-without-bom/pom.xml @@ -23,8 +23,8 @@ 1.8 UTF-8 0.31.1 - 2.41.0 - 3.43.0 + 2.52.0 + 3.53.0 @@ -33,7 +33,7 @@ com.google.cloud google-cloud-spanner - 6.65.1 + 6.76.0 @@ -100,18 +100,23 @@ com.google.truth truth - 1.4.2 + 1.4.4 test + + + ../snippets/src/main/resources + + org.codehaus.mojo build-helper-maven-plugin - 3.5.0 + 3.6.0 add-snippets-source @@ -140,11 +145,11 @@ org.apache.maven.plugins maven-failsafe-plugin - 3.2.5 + 3.5.0 - java-client-integration-test - java-client-mr-integration-test + java-client-integration-tests + java-client-mr-integration-tests nam11 us-east1 java-client-integration-test-cmek-ring diff --git a/samples/native-image/README.md b/samples/native-image/README.md deleted file mode 100644 index 7e02ac77c9d..00000000000 --- a/samples/native-image/README.md +++ /dev/null @@ -1,92 +0,0 @@ -# Spanner Sample Application with Native Image - -This is a sample application which uses the Cloud Spanner client libraries and demonstrates compatibility with Native Image compilation. - -The application creates a new Spanner instance and database, and it runs basic operations including queries and Spanner mutations. - -## Setup Instructions - -You will need to follow these prerequisite steps in order to run these samples: - -1. If you have not already, [create a Google Cloud Platform Project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#creating_a_project). - -2. Install the [Google Cloud SDK](https://cloud.google.com/sdk/) which will allow you to run the sample with your project's credentials. - - Once installed, log in with Application Default Credentials using the following command: - - ``` - gcloud auth application-default login - ``` - - **Note:** Authenticating with Application Default Credentials is convenient to use during development, but we recommend [alternate methods of authentication](https://cloud.google.com/docs/authentication/production) during production use. - -3. Install the GraalVM compiler. - - You can follow the [official installation instructions](https://www.graalvm.org/docs/getting-started-with-graalvm/#install-graalvm) from the GraalVM website. - After following the instructions, ensure that you install the Native Image extension installed by running: - - ``` - gu install native-image - ``` - - Once you finish following the instructions, verify that the default version of Java is set to the GraalVM version by running `java -version` in a terminal. - - You will see something similar to the below output: - - ``` - $ java -version - - openjdk version "11.0.15" 2022-04-19 - OpenJDK Runtime Environment GraalVM CE 22.1.0 (build 11.0.15+10-jvmci-22.1-b06) - OpenJDK 64-Bit Server VM GraalVM CE 22.1.0 (build 11.0.15+10-jvmci-22.1-b06, mixed mode, sharing) - - ``` -## Run with Native Image Compilation - -1. **(Optional)** If you wish to run the application against the [Spanner emulator](https://cloud.google.com/spanner/docs/emulator), make sure that you have the [Google Cloud SDK](https://cloud.google.com/sdk) installed. - - In a new terminal window, start the emulator via `gcloud`: - - ``` - gcloud beta emulators spanner start - ``` - - You may leave the emulator running for now. - In the next section, we will run the sample application against the Spanner emulator instsance. - -2. Navigate to this directory and compile the application with the Native Image compiler. - - ``` - mvn package -P native -DskipTests - ``` - -3. **(Optional)** If you're using the emulator, export the `SPANNER_EMULATOR_HOST` as an environment variable in your terminal. - - ``` - export SPANNER_EMULATOR_HOST=localhost:9010 - ``` - - The Spanner Client Libraries will detect this environment variable and will automatically connect to the emulator instance if this variable is set. - -4. Run the application. - - ``` - ./target/native-image - ``` - -5. The application will run through some basic Spanner operations and log some output statements. - - ``` - Running the Spanner Sample. - Singers Registered in Spanner: - Bob Loblaw - Virginia Watson - ``` - -## Sample Integration test with Native Image Support - -In order to run the sample integration test as a native image, call the following command: - - ``` - mvn test -Pnative - ``` diff --git a/samples/native-image/pom.xml b/samples/native-image/pom.xml deleted file mode 100644 index 8cf8cc752e1..00000000000 --- a/samples/native-image/pom.xml +++ /dev/null @@ -1,165 +0,0 @@ - - - 4.0.0 - com.google.cloud - native-image - Native Image Sample - https://github.com/googleapis/java-spanner - - - - com.google.cloud.samples - shared-configuration - 1.2.0 - - - - - - 1.8 - 1.8 - UTF-8 - - - - - - com.google.cloud - libraries-bom - 26.37.0 - pom - import - - - - - - - com.google.cloud - google-cloud-spanner - - - - junit - junit - 4.13.2 - test - - - com.google.truth - truth - 1.4.2 - test - - - - - - - - org.apache.maven.plugins - maven-jar-plugin - 3.4.1 - - - - true - dependency-jars/ - com.example.spanner.NativeImageSpannerSample - - - - - - org.apache.maven.plugins - maven-dependency-plugin - 3.6.1 - - - copy-dependencies - package - - copy-dependencies - - - - ${project.build.directory}/dependency-jars/ - - - - - - - - - - - - native - - - - org.junit.vintage - junit-vintage-engine - 5.10.2 - test - - - org.graalvm.buildtools - junit-platform-native - 0.10.1 - test - - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - 3.2.5 - - - **/*IT - - - - - org.graalvm.buildtools - native-maven-plugin - 0.10.1 - true - - com.example.spanner.NativeImageSpannerSample - - --no-fallback - --no-server - - - - - build-native - - build - test - - package - - - test-native - - test - - test - - - - - - - - \ No newline at end of file diff --git a/samples/native-image/src/main/java/com/example/spanner/DatabaseOperations.java b/samples/native-image/src/main/java/com/example/spanner/DatabaseOperations.java deleted file mode 100644 index 829556c6748..00000000000 --- a/samples/native-image/src/main/java/com/example/spanner/DatabaseOperations.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.spanner; - -import com.google.cloud.spanner.Database; -import com.google.cloud.spanner.DatabaseAdminClient; -import com.google.cloud.spanner.DatabaseClient; -import com.google.cloud.spanner.KeySet; -import com.google.cloud.spanner.Mutation; -import com.google.cloud.spanner.ResultSet; -import com.google.cloud.spanner.Statement; -import com.google.common.collect.ImmutableList; -import java.util.Collections; -import java.util.List; - -/** Helper methods to manage Spanner Databases. */ -public class DatabaseOperations { - - private static final List DDL_STATEMENTS = - ImmutableList.of( - "CREATE TABLE Singers (SingerId INT64 NOT NULL, FirstName " - + "STRING(1024), LastName STRING(1024)) PRIMARY KEY (SingerId)"); - - static void createDatabase( - DatabaseAdminClient databaseAdminClient, String instanceId, String databaseId) { - - if (databaseExists(databaseAdminClient, instanceId, databaseId)) { - databaseAdminClient.dropDatabase(instanceId, databaseId); - } - databaseAdminClient.createDatabase(instanceId, databaseId, DDL_STATEMENTS); - } - - static boolean databaseExists( - DatabaseAdminClient databaseAdminClient, String instanceId, String databaseId) { - - for (Database database : databaseAdminClient.listDatabases(instanceId).iterateAll()) { - if (databaseId.equals(database.getId().getDatabase())) { - return true; - } - } - return false; - } - - static void insertUsingDml(DatabaseClient dbClient) { - dbClient - .readWriteTransaction() - .run( - transaction -> { - String sql = - "INSERT INTO Singers (SingerId, FirstName, LastName) " - + " VALUES (10, 'Virginia', 'Watson')"; - transaction.executeUpdate(Statement.of(sql)); - return null; - }); - } - - static void insertUsingMutation(DatabaseClient dbClient) { - Mutation mutation = - Mutation.newInsertBuilder("Singers") - .set("SingerId") - .to(12) - .set("FirstName") - .to("Bob") - .set("LastName") - .to("Loblaw") - .build(); - dbClient.write(Collections.singletonList(mutation)); - } - - static ResultSet performRead(DatabaseClient dbClient) { - return dbClient.singleUse().executeQuery(Statement.of("SELECT * FROM Singers")); - } - - static void deleteDatabase(DatabaseClient dbClient) { - dbClient.write(Collections.singletonList(Mutation.delete("Singers", KeySet.all()))); - System.out.println("Records deleted."); - } -} diff --git a/samples/native-image/src/main/java/com/example/spanner/InstanceOperations.java b/samples/native-image/src/main/java/com/example/spanner/InstanceOperations.java deleted file mode 100644 index 75efd6b45b1..00000000000 --- a/samples/native-image/src/main/java/com/example/spanner/InstanceOperations.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.spanner; - -import com.google.cloud.spanner.Instance; -import com.google.cloud.spanner.InstanceAdminClient; -import com.google.cloud.spanner.InstanceConfigId; -import com.google.cloud.spanner.InstanceId; -import com.google.cloud.spanner.InstanceInfo; - -/** Helper methods to manage Spanner instances. */ -public class InstanceOperations { - - static void createTestInstance( - InstanceAdminClient instanceAdminClient, String projectId, String instanceId) - throws Exception { - - if (instanceExists(instanceAdminClient, instanceId)) { - instanceAdminClient.deleteInstance(instanceId); - } - - InstanceInfo instanceInfo = - InstanceInfo.newBuilder(InstanceId.of(projectId, instanceId)) - .setInstanceConfigId(InstanceConfigId.of(projectId, "regional-us-central1")) - .setNodeCount(1) - .setDisplayName(instanceId) - .build(); - try { - instanceAdminClient.createInstance(instanceInfo).get(); - } catch (Exception e) { - throw new Exception("Failed to create Spanner instance.", e); - } - } - - static boolean instanceExists(InstanceAdminClient instanceAdminClient, String instanceName) { - for (Instance instance : instanceAdminClient.listInstances().iterateAll()) { - if (instanceName.equals(instance.getId().getInstance())) { - return true; - } - } - return false; - } -} diff --git a/samples/native-image/src/main/java/com/example/spanner/NativeImageSpannerSample.java b/samples/native-image/src/main/java/com/example/spanner/NativeImageSpannerSample.java deleted file mode 100644 index b71404daf07..00000000000 --- a/samples/native-image/src/main/java/com/example/spanner/NativeImageSpannerSample.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.spanner; - -import com.google.cloud.spanner.DatabaseAdminClient; -import com.google.cloud.spanner.DatabaseClient; -import com.google.cloud.spanner.DatabaseId; -import com.google.cloud.spanner.InstanceAdminClient; -import com.google.cloud.spanner.ResultSet; -import com.google.cloud.spanner.Spanner; -import com.google.cloud.spanner.SpannerOptions; - -/** Sample Spanner application compiled with Native Image. */ -public class NativeImageSpannerSample { - - private static final String TEST_INSTANCE_ID = "test-instance"; - private static final String TEST_DATABASE_ID = "test-database"; - - /** - * Runs the Spanner sample application. - * - *

    This application should be run with the Spanner emulator for testing purposes. - */ - public static void main(String[] args) throws Exception { - System.out.println("Running the Spanner Sample."); - - SpannerOptions options = SpannerOptions.newBuilder().build(); - Spanner spanner = options.getService(); - - // Setup the Spanner environment. - InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient(); - DatabaseAdminClient databaseAdminClient = spanner.getDatabaseAdminClient(); - - InstanceOperations.createTestInstance( - instanceAdminClient, options.getProjectId(), TEST_INSTANCE_ID); - DatabaseOperations.createDatabase(databaseAdminClient, TEST_INSTANCE_ID, TEST_DATABASE_ID); - - // Insert data - DatabaseClient dbClient = - spanner.getDatabaseClient( - DatabaseId.of(options.getProjectId(), TEST_INSTANCE_ID, TEST_DATABASE_ID)); - DatabaseOperations.insertUsingDml(dbClient); - DatabaseOperations.insertUsingMutation(dbClient); - - // Run some queries. - ResultSet resultSet = DatabaseOperations.performRead(dbClient); - System.out.println("Singers Registered in Spanner:"); - while (resultSet.next()) { - System.out.println(resultSet.getString("FirstName") + " " + resultSet.getString("LastName")); - } - if (DatabaseOperations.databaseExists( - databaseAdminClient, TEST_INSTANCE_ID, TEST_DATABASE_ID)) { - DatabaseOperations.deleteDatabase(dbClient); - } - } -} diff --git a/samples/native-image/src/test/java/com/example/spanner/NativeImageSpannerSampleIT.java b/samples/native-image/src/test/java/com/example/spanner/NativeImageSpannerSampleIT.java deleted file mode 100644 index ce47de92733..00000000000 --- a/samples/native-image/src/test/java/com/example/spanner/NativeImageSpannerSampleIT.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.spanner; - -import static com.google.common.truth.Truth.assertThat; - -import com.google.cloud.testing.junit4.StdOutCaptureRule; -import org.junit.Rule; -import org.junit.Test; - -public class NativeImageSpannerSampleIT { - - @Rule public StdOutCaptureRule stdOut = new StdOutCaptureRule(); - - @Test - public void testStoreAndRead() throws Exception { - NativeImageSpannerSample.main(new String[] {}); - assertThat(stdOut.getCapturedOutputAsUtf8String()).contains("Singers Registered in Spanner:"); - assertThat(stdOut.getCapturedOutputAsUtf8String()).contains("Virginia Watson"); - assertThat(stdOut.getCapturedOutputAsUtf8String()).contains("Bob Loblaw"); - } -} diff --git a/samples/pom.xml b/samples/pom.xml index 07b17a608cd..7f027400da9 100644 --- a/samples/pom.xml +++ b/samples/pom.xml @@ -32,7 +32,6 @@ install-without-bom snapshot snippets - native-image @@ -40,7 +39,7 @@ org.apache.maven.plugins maven-deploy-plugin - 3.1.1 + 3.1.3 true @@ -48,7 +47,7 @@ org.sonatype.plugins nexus-staging-maven-plugin - 1.6.13 + 1.7.0 true diff --git a/samples/snapshot/pom.xml b/samples/snapshot/pom.xml index edb577d2f8a..a46e149d0a1 100644 --- a/samples/snapshot/pom.xml +++ b/samples/snapshot/pom.xml @@ -23,8 +23,8 @@ 1.8 UTF-8 0.31.1 - 2.41.0 - 3.43.0 + 2.52.0 + 3.53.0 @@ -32,7 +32,7 @@ com.google.cloud google-cloud-spanner - 6.66.1-SNAPSHOT + 6.78.1-SNAPSHOT @@ -99,18 +99,23 @@ com.google.truth truth - 1.4.2 + 1.4.4 test + + + ../snippets/src/main/resources + + org.codehaus.mojo build-helper-maven-plugin - 3.5.0 + 3.6.0 add-snippets-source @@ -139,11 +144,11 @@ org.apache.maven.plugins maven-failsafe-plugin - 3.2.5 + 3.5.0 - java-client-integration-test - java-client-mr-integration-test + java-client-integration-tests + java-client-mr-integration-tests nam11 us-east1 java-client-integration-test-cmek-ring diff --git a/samples/snippets/pom.xml b/samples/snippets/pom.xml index f1f1c3be01f..90efc8b5178 100644 --- a/samples/snippets/pom.xml +++ b/samples/snippets/pom.xml @@ -34,7 +34,7 @@ com.google.cloud libraries-bom - 26.38.0 + 26.48.0 pom import @@ -111,7 +111,7 @@ com.google.truth truth - 1.4.2 + 1.4.4 test @@ -175,11 +175,11 @@ org.apache.maven.plugins maven-failsafe-plugin - 3.2.5 + 3.5.0 - java-client-integration-test - java-client-mr-integration-test + java-client-integration-tests + java-client-mr-integration-tests nam11 us-east1 java-client-integration-test-cmek-ring @@ -189,6 +189,15 @@ + + org.apache.maven.plugins + maven-checkstyle-plugin + 3.5.0 + + + **/SingerProto.java + + diff --git a/samples/snippets/src/main/java/com/example/spanner/AddProtoColumnSample.java b/samples/snippets/src/main/java/com/example/spanner/AddProtoColumnSample.java new file mode 100644 index 00000000000..36be70034f7 --- /dev/null +++ b/samples/snippets/src/main/java/com/example/spanner/AddProtoColumnSample.java @@ -0,0 +1,76 @@ +/* + * Copyright 2024 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_add_proto_type_columns] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.ByteString; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest; +import java.io.IOException; +import java.io.InputStream; +import java.util.concurrent.ExecutionException; + +class AddProtoColumnSample { + + static void addProtoColumn() throws InterruptedException, ExecutionException, IOException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + addProtoColumn(projectId, instanceId, databaseId); + } + + static void addProtoColumn(String projectId, String instanceId, String databaseId) + throws InterruptedException, ExecutionException, IOException { + InputStream in = + AddProtoColumnSample.class + .getClassLoader() + .getResourceAsStream("com/example/spanner/descriptors.pb"); + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.createDatabaseAdminClient()) { + UpdateDatabaseDdlRequest request = + UpdateDatabaseDdlRequest.newBuilder() + .setDatabase(DatabaseName.of(projectId, instanceId, databaseId).toString()) + .addAllStatements( + ImmutableList.of( + "CREATE PROTO BUNDLE (" + + "examples.spanner.music.SingerInfo," + + "examples.spanner.music.Genre," + + ")", + "ALTER TABLE Singers ADD COLUMN SingerInfo examples.spanner.music.SingerInfo", + "ALTER TABLE Singers ADD COLUMN " + + "SingerInfoArray ARRAY", + "ALTER TABLE Singers ADD COLUMN SingerGenre examples.spanner.music.Genre", + "ALTER TABLE Singers ADD COLUMN " + + "SingerGenreArray ARRAY")) + .setProtoDescriptors(ByteString.readFrom(in)) + .build(); + // Wait for the operation to finish. + // This will throw an ExecutionException if the operation fails. + databaseAdminClient.updateDatabaseDdlAsync(request).get(); + System.out.printf("Added Proto columns %n"); + } + } +} +// [END spanner_add_proto_type_columns] diff --git a/samples/snippets/src/main/java/com/example/spanner/ChangeStreamsTxnExclusionSample.java b/samples/snippets/src/main/java/com/example/spanner/ChangeStreamsTxnExclusionSample.java index 309a064f4ff..9ee0155ae5b 100644 --- a/samples/snippets/src/main/java/com/example/spanner/ChangeStreamsTxnExclusionSample.java +++ b/samples/snippets/src/main/java/com/example/spanner/ChangeStreamsTxnExclusionSample.java @@ -1,5 +1,5 @@ /* - * Copyright 2021 Google LLC + * Copyright 2024 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/samples/snippets/src/main/java/com/example/spanner/CreateFullBackupScheduleSample.java b/samples/snippets/src/main/java/com/example/spanner/CreateFullBackupScheduleSample.java new file mode 100644 index 00000000000..b3836092692 --- /dev/null +++ b/samples/snippets/src/main/java/com/example/spanner/CreateFullBackupScheduleSample.java @@ -0,0 +1,77 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_create_full_backup_schedule] + +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.protobuf.Duration; +import com.google.spanner.admin.database.v1.BackupSchedule; +import com.google.spanner.admin.database.v1.BackupScheduleSpec; +import com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig; +import com.google.spanner.admin.database.v1.CreateBackupScheduleRequest; +import com.google.spanner.admin.database.v1.CrontabSpec; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.database.v1.FullBackupSpec; +import java.io.IOException; + +class CreateFullBackupScheduleSample { + + static void createFullBackupSchedule() throws IOException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String backupScheduleId = "my-backup-schedule"; + createFullBackupSchedule(projectId, instanceId, databaseId, backupScheduleId); + } + + static void createFullBackupSchedule( + String projectId, String instanceId, String databaseId, String backupScheduleId) + throws IOException { + final CreateBackupEncryptionConfig encryptionConfig = + CreateBackupEncryptionConfig.newBuilder() + .setEncryptionType(CreateBackupEncryptionConfig.EncryptionType.USE_DATABASE_ENCRYPTION) + .build(); + final BackupSchedule backupSchedule = + BackupSchedule.newBuilder() + .setFullBackupSpec(FullBackupSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().setSeconds(3600 * 24).build()) + .setSpec( + BackupScheduleSpec.newBuilder() + .setCronSpec(CrontabSpec.newBuilder().setText("30 12 * * *").build()) + .build()) + .setEncryptionConfig(encryptionConfig) + .build(); + + try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) { + DatabaseName databaseName = DatabaseName.of(projectId, instanceId, databaseId); + final BackupSchedule createdBackupSchedule = + databaseAdminClient.createBackupSchedule( + CreateBackupScheduleRequest.newBuilder() + .setParent(databaseName.toString()) + .setBackupScheduleId(backupScheduleId) + .setBackupSchedule(backupSchedule) + .build()); + System.out.println( + String.format( + "Created backup schedule: %s\n%s", + createdBackupSchedule.getName(), createdBackupSchedule.toString())); + } + } +} +// [END spanner_create_full_backup_schedule] diff --git a/samples/snippets/src/main/java/com/example/spanner/CreateIncrementalBackupScheduleSample.java b/samples/snippets/src/main/java/com/example/spanner/CreateIncrementalBackupScheduleSample.java new file mode 100644 index 00000000000..f73ebd30f23 --- /dev/null +++ b/samples/snippets/src/main/java/com/example/spanner/CreateIncrementalBackupScheduleSample.java @@ -0,0 +1,78 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_create_incremental_backup_schedule] + +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.protobuf.Duration; +import com.google.spanner.admin.database.v1.BackupSchedule; +import com.google.spanner.admin.database.v1.BackupScheduleSpec; +import com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig; +import com.google.spanner.admin.database.v1.CreateBackupScheduleRequest; +import com.google.spanner.admin.database.v1.CrontabSpec; +import com.google.spanner.admin.database.v1.DatabaseName; +import com.google.spanner.admin.database.v1.IncrementalBackupSpec; +import java.io.IOException; + +class CreateIncrementalBackupScheduleSample { + + static void createIncrementalBackupSchedule() throws IOException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String backupScheduleId = "my-backup-schedule"; + createIncrementalBackupSchedule(projectId, instanceId, databaseId, backupScheduleId); + } + + static void createIncrementalBackupSchedule( + String projectId, String instanceId, String databaseId, String backupScheduleId) + throws IOException { + final CreateBackupEncryptionConfig encryptionConfig = + CreateBackupEncryptionConfig.newBuilder() + .setEncryptionType( + CreateBackupEncryptionConfig.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION) + .build(); + final BackupSchedule backupSchedule = + BackupSchedule.newBuilder() + .setIncrementalBackupSpec(IncrementalBackupSpec.newBuilder().build()) + .setRetentionDuration(Duration.newBuilder().setSeconds(3600 * 24).build()) + .setSpec( + BackupScheduleSpec.newBuilder() + .setCronSpec(CrontabSpec.newBuilder().setText("30 12 * * *").build()) + .build()) + .setEncryptionConfig(encryptionConfig) + .build(); + + try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) { + DatabaseName databaseName = DatabaseName.of(projectId, instanceId, databaseId); + final BackupSchedule createdBackupSchedule = + databaseAdminClient.createBackupSchedule( + CreateBackupScheduleRequest.newBuilder() + .setParent(databaseName.toString()) + .setBackupScheduleId(backupScheduleId) + .setBackupSchedule(backupSchedule) + .build()); + System.out.println( + String.format( + "Created incremental backup schedule: %s\n%s", + createdBackupSchedule.getName(), createdBackupSchedule.toString())); + } + } +} +// [END spanner_create_incremental_backup_schedule] diff --git a/samples/snippets/src/main/java/com/example/spanner/CreateInstanceExample.java b/samples/snippets/src/main/java/com/example/spanner/CreateInstanceExample.java index b53727ba6d5..c5efd3956fa 100644 --- a/samples/snippets/src/main/java/com/example/spanner/CreateInstanceExample.java +++ b/samples/snippets/src/main/java/com/example/spanner/CreateInstanceExample.java @@ -16,7 +16,7 @@ package com.example.spanner; -//[START spanner_create_instance] +// [START spanner_create_instance] import com.google.cloud.spanner.Spanner; import com.google.cloud.spanner.SpannerOptions; @@ -45,25 +45,25 @@ static void createInstance(String projectId, String instanceId) { Instance instance = Instance.newBuilder() .setDisplayName(displayName) + .setEdition(Instance.Edition.STANDARD) .setNodeCount(nodeCount) - .setConfig( - InstanceConfigName.of(projectId, "regional-us-central1").toString()) + .setConfig(InstanceConfigName.of(projectId, "regional-us-east4").toString()) .build(); - + try (Spanner spanner = - SpannerOptions.newBuilder() - .setProjectId(projectId) - .build() - .getService(); + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient()) { // Wait for the createInstance operation to finish. - Instance createdInstance = instanceAdminClient.createInstanceAsync( - CreateInstanceRequest.newBuilder() - .setParent(ProjectName.of(projectId).toString()) - .setInstanceId(instanceId) - .setInstance(instance) - .build()).get(); + Instance createdInstance = + instanceAdminClient + .createInstanceAsync( + CreateInstanceRequest.newBuilder() + .setParent(ProjectName.of(projectId).toString()) + .setInstanceId(instanceId) + .setInstance(instance) + .build()) + .get(); System.out.printf("Instance %s was successfully created%n", createdInstance.getName()); } catch (ExecutionException e) { System.out.printf( @@ -74,4 +74,4 @@ static void createInstance(String projectId, String instanceId) { } } } -//[END spanner_create_instance] \ No newline at end of file +// [END spanner_create_instance] diff --git a/samples/snippets/src/main/java/com/example/spanner/CreateInstancePartitionSample.java b/samples/snippets/src/main/java/com/example/spanner/CreateInstancePartitionSample.java new file mode 100644 index 00000000000..0e547bdaf7e --- /dev/null +++ b/samples/snippets/src/main/java/com/example/spanner/CreateInstancePartitionSample.java @@ -0,0 +1,80 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_create_instance_partition] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient; +import com.google.spanner.admin.instance.v1.CreateInstancePartitionRequest; +import com.google.spanner.admin.instance.v1.InstanceConfigName; +import com.google.spanner.admin.instance.v1.InstanceName; +import com.google.spanner.admin.instance.v1.InstancePartition; +import java.util.concurrent.ExecutionException; + +class CreateInstancePartitionSample { + + static void createInstancePartition() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String instancePartitionId = "my-instance-partition"; + createInstancePartition(projectId, instanceId, instancePartitionId); + } + + static void createInstancePartition( + String projectId, String instanceId, String instancePartitionId) { + // Set instance partition configuration. + int nodeCount = 1; + String displayName = "Descriptive name"; + + // Create an InstancePartition object that will be used to create the instance partition. + InstancePartition instancePartition = + InstancePartition.newBuilder() + .setDisplayName(displayName) + .setNodeCount(nodeCount) + .setConfig(InstanceConfigName.of(projectId, "nam3").toString()) + .build(); + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient()) { + + // Wait for the createInstancePartition operation to finish. + InstancePartition createdInstancePartition = + instanceAdminClient + .createInstancePartitionAsync( + CreateInstancePartitionRequest.newBuilder() + .setParent(InstanceName.of(projectId, instanceId).toString()) + .setInstancePartitionId(instancePartitionId) + .setInstancePartition(instancePartition) + .build()) + .get(); + System.out.printf( + "Instance partition %s was successfully created%n", createdInstancePartition.getName()); + } catch (ExecutionException e) { + System.out.printf( + "Error: Creating instance partition %s failed with error message %s%n", + instancePartition.getName(), e.getMessage()); + } catch (InterruptedException e) { + System.out.println( + "Error: Waiting for createInstancePartition operation to finish was interrupted"); + } + } +} +// [END spanner_create_instance_partition] diff --git a/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithAutoscalingConfigExample.java b/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithAutoscalingConfigExample.java index dc62dd7a684..0a6e21ea620 100644 --- a/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithAutoscalingConfigExample.java +++ b/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithAutoscalingConfigExample.java @@ -45,7 +45,7 @@ static void createInstance(String projectId, String instanceId) { .getService(); InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient()) { // Set Instance configuration. - String configId = "regional-us-central1"; + String configId = "regional-us-east4"; String displayName = "Descriptive name"; // Create an autoscaling config. diff --git a/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithProcessingUnitsExample.java b/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithProcessingUnitsExample.java index 293c10249c5..51133194744 100644 --- a/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithProcessingUnitsExample.java +++ b/samples/snippets/src/main/java/com/example/spanner/CreateInstanceWithProcessingUnitsExample.java @@ -44,7 +44,7 @@ static void createInstance(String projectId, String instanceId) { InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient()) { // Set Instance configuration. - String configId = "regional-us-central1"; + String configId = "regional-us-east4"; // This will create an instance with the processing power of 0.2 nodes. int processingUnits = 500; String displayName = "Descriptive name"; diff --git a/samples/snippets/src/main/java/com/example/spanner/DeleteBackupScheduleSample.java b/samples/snippets/src/main/java/com/example/spanner/DeleteBackupScheduleSample.java new file mode 100644 index 00000000000..e87a1fcb66e --- /dev/null +++ b/samples/snippets/src/main/java/com/example/spanner/DeleteBackupScheduleSample.java @@ -0,0 +1,50 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_delete_backup_schedule] + +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.spanner.admin.database.v1.BackupScheduleName; +import com.google.spanner.admin.database.v1.DeleteBackupScheduleRequest; +import java.io.IOException; + +class DeleteBackupScheduleSample { + + static void deleteBackupSchedule() throws IOException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String backupScheduleId = "my-backup-schedule"; + deleteBackupSchedule(projectId, instanceId, databaseId, backupScheduleId); + } + + static void deleteBackupSchedule( + String projectId, String instanceId, String databaseId, String backupScheduleId) + throws IOException { + try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) { + BackupScheduleName backupScheduleName = + BackupScheduleName.of(projectId, instanceId, databaseId, backupScheduleId); + databaseAdminClient.deleteBackupSchedule( + DeleteBackupScheduleRequest.newBuilder().setName(backupScheduleName.toString()).build()); + System.out.println( + String.format("Deleted backup schedule: %s", backupScheduleName.toString())); + } + } +} +// [END spanner_delete_backup_schedule] diff --git a/samples/snippets/src/main/java/com/example/spanner/GetBackupScheduleSample.java b/samples/snippets/src/main/java/com/example/spanner/GetBackupScheduleSample.java new file mode 100644 index 00000000000..3cd7e21f9b1 --- /dev/null +++ b/samples/snippets/src/main/java/com/example/spanner/GetBackupScheduleSample.java @@ -0,0 +1,53 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_get_backup_schedule] + +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.spanner.admin.database.v1.BackupSchedule; +import com.google.spanner.admin.database.v1.BackupScheduleName; +import com.google.spanner.admin.database.v1.GetBackupScheduleRequest; +import java.io.IOException; + +class GetBackupScheduleSample { + + static void getBackupSchedule() throws IOException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String backupScheduleId = "my-backup-schedule"; + getBackupSchedule(projectId, instanceId, databaseId, backupScheduleId); + } + + static void getBackupSchedule( + String projectId, String instanceId, String databaseId, String backupScheduleId) + throws IOException { + try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) { + BackupScheduleName backupScheduleName = + BackupScheduleName.of(projectId, instanceId, databaseId, backupScheduleId); + final BackupSchedule backupSchedule = + databaseAdminClient.getBackupSchedule( + GetBackupScheduleRequest.newBuilder().setName(backupScheduleName.toString()).build()); + System.out.println( + String.format( + "Backup schedule: %s\n%s", backupSchedule.getName(), backupSchedule.toString())); + } + } +} +// [END spanner_get_backup_schedule] diff --git a/samples/snippets/src/main/java/com/example/spanner/ListBackupSchedulesSample.java b/samples/snippets/src/main/java/com/example/spanner/ListBackupSchedulesSample.java new file mode 100644 index 00000000000..fba708937c0 --- /dev/null +++ b/samples/snippets/src/main/java/com/example/spanner/ListBackupSchedulesSample.java @@ -0,0 +1,52 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_list_backup_schedules] + +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.spanner.admin.database.v1.BackupSchedule; +import com.google.spanner.admin.database.v1.DatabaseName; +import java.io.IOException; + +class ListBackupSchedulesSample { + + static void listBackupSchedules() throws IOException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + listBackupSchedules(projectId, instanceId, databaseId); + } + + static void listBackupSchedules(String projectId, String instanceId, String databaseId) + throws IOException { + try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) { + DatabaseName databaseName = DatabaseName.of(projectId, instanceId, databaseId); + + System.out.println( + String.format("Backup schedules for database '%s'", databaseName.toString())); + for (BackupSchedule backupSchedule : + databaseAdminClient.listBackupSchedules(databaseName).iterateAll()) { + System.out.println( + String.format( + "Backup schedule: %s\n%s", backupSchedule.getName(), backupSchedule.toString())); + } + } + } +} +// [END spanner_list_backup_schedules] diff --git a/samples/snippets/src/main/java/com/example/spanner/QueryWithProtoParameterSample.java b/samples/snippets/src/main/java/com/example/spanner/QueryWithProtoParameterSample.java new file mode 100644 index 00000000000..b700fa341fa --- /dev/null +++ b/samples/snippets/src/main/java/com/example/spanner/QueryWithProtoParameterSample.java @@ -0,0 +1,70 @@ +/* + * Copyright 2024 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_query_with_proto_types_parameter] +import com.example.spanner.SingerProto.Genre; +import com.example.spanner.SingerProto.SingerInfo; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; + +class QueryWithProtoParameterSample { + + static void queryWithProtoParameter() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + queryWithProtoParameter(client); + } + } + + static void queryWithProtoParameter(DatabaseClient client) { + Statement statement = + Statement.newBuilder( + "SELECT SingerId, SingerInfo, SingerInfo.nationality, SingerInfoArray, " + + "SingerGenre, SingerGenreArray FROM Singers " + + "WHERE SingerInfo.nationality=@country and SingerGenre=@singerGenre") + .bind("country") + .to("Country2") + .bind("singerGenre") + .to(Genre.FOLK) + .build(); + try (ResultSet resultSet = client.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s %s %s %s%n", + resultSet.getLong("SingerId"), + resultSet.getProtoMessage("SingerInfo", SingerInfo.getDefaultInstance()), + resultSet.getString("nationality"), + resultSet.getProtoMessageList("SingerInfoArray", SingerInfo.getDefaultInstance()), + resultSet.getProtoEnum("SingerGenre", Genre::forNumber), + resultSet.getProtoEnumList("SingerGenreArray", Genre::forNumber)); + } + } + } +} +// [END spanner_query_with_proto_types_parameter] diff --git a/samples/snippets/src/main/java/com/example/spanner/SingerProto.java b/samples/snippets/src/main/java/com/example/spanner/SingerProto.java new file mode 100644 index 00000000000..b962e4bc6b7 --- /dev/null +++ b/samples/snippets/src/main/java/com/example/spanner/SingerProto.java @@ -0,0 +1,1191 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: samples/snippets/src/main/resources/com/example/spanner/singer.proto + +// Protobuf Java Version: 3.25.1 +package com.example.spanner; + +public final class SingerProto { + private SingerProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + /** Protobuf enum {@code examples.spanner.music.Genre} */ + public enum Genre implements com.google.protobuf.ProtocolMessageEnum { + /** POP = 0; */ + POP(0), + /** JAZZ = 1; */ + JAZZ(1), + /** FOLK = 2; */ + FOLK(2), + /** ROCK = 3; */ + ROCK(3), + UNRECOGNIZED(-1), + ; + + /** POP = 0; */ + public static final int POP_VALUE = 0; + /** JAZZ = 1; */ + public static final int JAZZ_VALUE = 1; + /** FOLK = 2; */ + public static final int FOLK_VALUE = 2; + /** ROCK = 3; */ + public static final int ROCK_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new IllegalArgumentException("Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @Deprecated + public static Genre valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Genre forNumber(int value) { + switch (value) { + case 0: + return POP; + case 1: + return JAZZ; + case 2: + return FOLK; + case 3: + return ROCK; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Genre findValueByNumber(int number) { + return Genre.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new IllegalStateException("Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return SingerProto.getDescriptor().getEnumTypes().get(0); + } + + private static final Genre[] VALUES = values(); + + public static Genre valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Genre(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:examples.spanner.music.Genre) + } + + public interface SingerInfoOrBuilder + extends + // @@protoc_insertion_point(interface_extends:examples.spanner.music.SingerInfo) + com.google.protobuf.MessageOrBuilder { + + /** + * optional int64 singer_id = 1; + * + * @return Whether the singerId field is set. + */ + boolean hasSingerId(); + /** + * optional int64 singer_id = 1; + * + * @return The singerId. + */ + long getSingerId(); + + /** + * optional string birth_date = 2; + * + * @return Whether the birthDate field is set. + */ + boolean hasBirthDate(); + /** + * optional string birth_date = 2; + * + * @return The birthDate. + */ + String getBirthDate(); + /** + * optional string birth_date = 2; + * + * @return The bytes for birthDate. + */ + com.google.protobuf.ByteString getBirthDateBytes(); + + /** + * optional string nationality = 3; + * + * @return Whether the nationality field is set. + */ + boolean hasNationality(); + /** + * optional string nationality = 3; + * + * @return The nationality. + */ + String getNationality(); + /** + * optional string nationality = 3; + * + * @return The bytes for nationality. + */ + com.google.protobuf.ByteString getNationalityBytes(); + + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return Whether the genre field is set. + */ + boolean hasGenre(); + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return The enum numeric value on the wire for genre. + */ + int getGenreValue(); + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return The genre. + */ + Genre getGenre(); + } + /** Protobuf type {@code examples.spanner.music.SingerInfo} */ + public static final class SingerInfo extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:examples.spanner.music.SingerInfo) + SingerInfoOrBuilder { + private static final long serialVersionUID = 0L; + // Use SingerInfo.newBuilder() to construct. + private SingerInfo(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private SingerInfo() { + birthDate_ = ""; + nationality_ = ""; + genre_ = 0; + } + + @Override + @SuppressWarnings({"unused"}) + protected Object newInstance(UnusedPrivateParameter unused) { + return new SingerInfo(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return SingerProto.internal_static_examples_spanner_music_SingerInfo_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return SingerProto.internal_static_examples_spanner_music_SingerInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized(SingerInfo.class, Builder.class); + } + + private int bitField0_; + public static final int SINGER_ID_FIELD_NUMBER = 1; + private long singerId_ = 0L; + /** + * optional int64 singer_id = 1; + * + * @return Whether the singerId field is set. + */ + @Override + public boolean hasSingerId() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * optional int64 singer_id = 1; + * + * @return The singerId. + */ + @Override + public long getSingerId() { + return singerId_; + } + + public static final int BIRTH_DATE_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile Object birthDate_ = ""; + /** + * optional string birth_date = 2; + * + * @return Whether the birthDate field is set. + */ + @Override + public boolean hasBirthDate() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * optional string birth_date = 2; + * + * @return The birthDate. + */ + @Override + public String getBirthDate() { + Object ref = birthDate_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + birthDate_ = s; + return s; + } + } + /** + * optional string birth_date = 2; + * + * @return The bytes for birthDate. + */ + @Override + public com.google.protobuf.ByteString getBirthDateBytes() { + Object ref = birthDate_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + birthDate_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int NATIONALITY_FIELD_NUMBER = 3; + + @SuppressWarnings("serial") + private volatile Object nationality_ = ""; + /** + * optional string nationality = 3; + * + * @return Whether the nationality field is set. + */ + @Override + public boolean hasNationality() { + return ((bitField0_ & 0x00000004) != 0); + } + /** + * optional string nationality = 3; + * + * @return The nationality. + */ + @Override + public String getNationality() { + Object ref = nationality_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + nationality_ = s; + return s; + } + } + /** + * optional string nationality = 3; + * + * @return The bytes for nationality. + */ + @Override + public com.google.protobuf.ByteString getNationalityBytes() { + Object ref = nationality_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + nationality_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int GENRE_FIELD_NUMBER = 4; + private int genre_ = 0; + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return Whether the genre field is set. + */ + @Override + public boolean hasGenre() { + return ((bitField0_ & 0x00000008) != 0); + } + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return The enum numeric value on the wire for genre. + */ + @Override + public int getGenreValue() { + return genre_; + } + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return The genre. + */ + @Override + public Genre getGenre() { + Genre result = Genre.forNumber(genre_); + return result == null ? Genre.UNRECOGNIZED : result; + } + + private byte memoizedIsInitialized = -1; + + @Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeInt64(1, singerId_); + } + if (((bitField0_ & 0x00000002) != 0)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, birthDate_); + } + if (((bitField0_ & 0x00000004) != 0)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, nationality_); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeEnum(4, genre_); + } + getUnknownFields().writeTo(output); + } + + @Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, singerId_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, birthDate_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, nationality_); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(4, genre_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof SingerInfo)) { + return super.equals(obj); + } + SingerInfo other = (SingerInfo) obj; + + if (hasSingerId() != other.hasSingerId()) return false; + if (hasSingerId()) { + if (getSingerId() != other.getSingerId()) return false; + } + if (hasBirthDate() != other.hasBirthDate()) return false; + if (hasBirthDate()) { + if (!getBirthDate().equals(other.getBirthDate())) return false; + } + if (hasNationality() != other.hasNationality()) return false; + if (hasNationality()) { + if (!getNationality().equals(other.getNationality())) return false; + } + if (hasGenre() != other.hasGenre()) return false; + if (hasGenre()) { + if (genre_ != other.genre_) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasSingerId()) { + hash = (37 * hash) + SINGER_ID_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getSingerId()); + } + if (hasBirthDate()) { + hash = (37 * hash) + BIRTH_DATE_FIELD_NUMBER; + hash = (53 * hash) + getBirthDate().hashCode(); + } + if (hasNationality()) { + hash = (37 * hash) + NATIONALITY_FIELD_NUMBER; + hash = (53 * hash) + getNationality().hashCode(); + } + if (hasGenre()) { + hash = (37 * hash) + GENRE_FIELD_NUMBER; + hash = (53 * hash) + genre_; + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static SingerInfo parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static SingerInfo parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static SingerInfo parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static SingerInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static SingerInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static SingerInfo parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static SingerInfo parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static SingerInfo parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static SingerInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static SingerInfo parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static SingerInfo parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static SingerInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(SingerInfo prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @Override + protected Builder newBuilderForType(BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** Protobuf type {@code examples.spanner.music.SingerInfo} */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:examples.spanner.music.SingerInfo) + SingerInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return SingerProto.internal_static_examples_spanner_music_SingerInfo_descriptor; + } + + @Override + protected FieldAccessorTable internalGetFieldAccessorTable() { + return SingerProto.internal_static_examples_spanner_music_SingerInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized(SingerInfo.class, Builder.class); + } + + // Construct using com.example.spanner.SingerProto.SingerInfo.newBuilder() + private Builder() {} + + private Builder(BuilderParent parent) { + super(parent); + } + + @Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + singerId_ = 0L; + birthDate_ = ""; + nationality_ = ""; + genre_ = 0; + return this; + } + + @Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return SingerProto.internal_static_examples_spanner_music_SingerInfo_descriptor; + } + + @Override + public SingerInfo getDefaultInstanceForType() { + return SingerInfo.getDefaultInstance(); + } + + @Override + public SingerInfo build() { + SingerInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @Override + public SingerInfo buildPartial() { + SingerInfo result = new SingerInfo(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(SingerInfo result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.singerId_ = singerId_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.birthDate_ = birthDate_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.nationality_ = nationality_; + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.genre_ = genre_; + to_bitField0_ |= 0x00000008; + } + result.bitField0_ |= to_bitField0_; + } + + @Override + public Builder clone() { + return super.clone(); + } + + @Override + public Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.setField(field, value); + } + + @Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { + return super.setRepeatedField(field, index, value); + } + + @Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { + return super.addRepeatedField(field, value); + } + + @Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof SingerInfo) { + return mergeFrom((SingerInfo) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(SingerInfo other) { + if (other == SingerInfo.getDefaultInstance()) return this; + if (other.hasSingerId()) { + setSingerId(other.getSingerId()); + } + if (other.hasBirthDate()) { + birthDate_ = other.birthDate_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasNationality()) { + nationality_ = other.nationality_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasGenre()) { + setGenre(other.getGenre()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @Override + public final boolean isInitialized() { + return true; + } + + @Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + singerId_ = input.readInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: + { + birthDate_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: + { + nationality_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 32: + { + genre_ = input.readEnum(); + bitField0_ |= 0x00000008; + break; + } // case 32 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private long singerId_; + /** + * optional int64 singer_id = 1; + * + * @return Whether the singerId field is set. + */ + @Override + public boolean hasSingerId() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * optional int64 singer_id = 1; + * + * @return The singerId. + */ + @Override + public long getSingerId() { + return singerId_; + } + /** + * optional int64 singer_id = 1; + * + * @param value The singerId to set. + * @return This builder for chaining. + */ + public Builder setSingerId(long value) { + + singerId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * optional int64 singer_id = 1; + * + * @return This builder for chaining. + */ + public Builder clearSingerId() { + bitField0_ = (bitField0_ & ~0x00000001); + singerId_ = 0L; + onChanged(); + return this; + } + + private Object birthDate_ = ""; + /** + * optional string birth_date = 2; + * + * @return Whether the birthDate field is set. + */ + public boolean hasBirthDate() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * optional string birth_date = 2; + * + * @return The birthDate. + */ + public String getBirthDate() { + Object ref = birthDate_; + if (!(ref instanceof String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + birthDate_ = s; + return s; + } else { + return (String) ref; + } + } + /** + * optional string birth_date = 2; + * + * @return The bytes for birthDate. + */ + public com.google.protobuf.ByteString getBirthDateBytes() { + Object ref = birthDate_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + birthDate_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string birth_date = 2; + * + * @param value The birthDate to set. + * @return This builder for chaining. + */ + public Builder setBirthDate(String value) { + if (value == null) { + throw new NullPointerException(); + } + birthDate_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * optional string birth_date = 2; + * + * @return This builder for chaining. + */ + public Builder clearBirthDate() { + birthDate_ = getDefaultInstance().getBirthDate(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * optional string birth_date = 2; + * + * @param value The bytes for birthDate to set. + * @return This builder for chaining. + */ + public Builder setBirthDateBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + birthDate_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private Object nationality_ = ""; + /** + * optional string nationality = 3; + * + * @return Whether the nationality field is set. + */ + public boolean hasNationality() { + return ((bitField0_ & 0x00000004) != 0); + } + /** + * optional string nationality = 3; + * + * @return The nationality. + */ + public String getNationality() { + Object ref = nationality_; + if (!(ref instanceof String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + nationality_ = s; + return s; + } else { + return (String) ref; + } + } + /** + * optional string nationality = 3; + * + * @return The bytes for nationality. + */ + public com.google.protobuf.ByteString getNationalityBytes() { + Object ref = nationality_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + nationality_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string nationality = 3; + * + * @param value The nationality to set. + * @return This builder for chaining. + */ + public Builder setNationality(String value) { + if (value == null) { + throw new NullPointerException(); + } + nationality_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * optional string nationality = 3; + * + * @return This builder for chaining. + */ + public Builder clearNationality() { + nationality_ = getDefaultInstance().getNationality(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + /** + * optional string nationality = 3; + * + * @param value The bytes for nationality to set. + * @return This builder for chaining. + */ + public Builder setNationalityBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + nationality_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private int genre_ = 0; + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return Whether the genre field is set. + */ + @Override + public boolean hasGenre() { + return ((bitField0_ & 0x00000008) != 0); + } + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return The enum numeric value on the wire for genre. + */ + @Override + public int getGenreValue() { + return genre_; + } + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @param value The enum numeric value on the wire for genre to set. + * @return This builder for chaining. + */ + public Builder setGenreValue(int value) { + genre_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return The genre. + */ + @Override + public Genre getGenre() { + Genre result = Genre.forNumber(genre_); + return result == null ? Genre.UNRECOGNIZED : result; + } + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @param value The genre to set. + * @return This builder for chaining. + */ + public Builder setGenre(Genre value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + genre_ = value.getNumber(); + onChanged(); + return this; + } + /** + * optional .examples.spanner.music.Genre genre = 4; + * + * @return This builder for chaining. + */ + public Builder clearGenre() { + bitField0_ = (bitField0_ & ~0x00000008); + genre_ = 0; + onChanged(); + return this; + } + + @Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:examples.spanner.music.SingerInfo) + } + + // @@protoc_insertion_point(class_scope:examples.spanner.music.SingerInfo) + private static final SingerInfo DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new SingerInfo(); + } + + public static SingerInfo getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @Override + public SingerInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @Override + public SingerInfo getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_examples_spanner_music_SingerInfo_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_examples_spanner_music_SingerInfo_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + String[] descriptorData = { + "\n\014singer.proto\022\026examples.spanner.music\"\301" + + "\001\n\nSingerInfo\022\026\n\tsinger_id\030\001 \001(\003H\000\210\001\001\022\027\n" + + "\nbirth_date\030\002 \001(\tH\001\210\001\001\022\030\n\013nationality\030\003 " + + "\001(\tH\002\210\001\001\0221\n\005genre\030\004 \001(\0162\035.examples.spann" + + "er.music.GenreH\003\210\001\001B\014\n\n_singer_idB\r\n\013_bi" + + "rth_dateB\016\n\014_nationalityB\010\n\006_genre*.\n\005Ge" + + "nre\022\007\n\003POP\020\000\022\010\n\004JAZZ\020\001\022\010\n\004FOLK\020\002\022\010\n\004ROCK" + + "\020\003B$\n\023com.example.spannerB\013SingerProtoP\000" + + "b\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] {}); + internal_static_examples_spanner_music_SingerInfo_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_examples_spanner_music_SingerInfo_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_examples_spanner_music_SingerInfo_descriptor, + new String[] { + "SingerId", "BirthDate", "Nationality", "Genre", + }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/samples/snippets/src/main/java/com/example/spanner/SpannerGraphSample.java b/samples/snippets/src/main/java/com/example/spanner/SpannerGraphSample.java new file mode 100644 index 00000000000..ccb053133e5 --- /dev/null +++ b/samples/snippets/src/main/java/com/example/spanner/SpannerGraphSample.java @@ -0,0 +1,587 @@ +/* + * Copyright 2024 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeyRange; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.InstanceName; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.ExecutionException; + +/** + * Example code for using the Cloud Spanner API. This example demonstrates all the common property + * graph operations that can be done on Cloud Spanner. These are: + * + *

    + * + *

      + *
    • Creating a Cloud Spanner database with a property graph. + *
    • Inserting data, updating and deleting data. + *
    • Executing graph queries. + *
    + */ +public class SpannerGraphSample { + + // [START spanner_insert_graph_data] + /** Class to contain sample Person data. */ + static class Person { + + final long id; + final String name; + final Timestamp birthday; + final String country; + final String city; + + Person(long id, String name, Timestamp birthday, String country, String city) { + this.id = id; + this.name = name; + this.birthday = birthday; + this.country = country; + this.city = city; + } + } + + /** Class to contain sample Account data. */ + static class Account { + + final long id; + final Timestamp createTime; + final boolean isBlocked; + final String nickName; + + Account(long id, Timestamp createTime, boolean isBlocked, String nickName) { + this.id = id; + this.createTime = createTime; + this.isBlocked = isBlocked; + this.nickName = nickName; + } + } + + /** Class to contain sample Transfer data. */ + static class Transfer { + + final long id; + final long toId; + final double amount; + final Timestamp createTime; + final String orderNumber; + + Transfer(long id, long toId, double amount, Timestamp createTime, String orderNumber) { + this.id = id; + this.toId = toId; + this.amount = amount; + this.createTime = createTime; + this.orderNumber = orderNumber; + } + } + + /** Class to contain sample Ownership data. */ + static class Own { + + final long id; + final long accountId; + final Timestamp createTime; + + Own(long id, long accountId, Timestamp createTime) { + this.id = id; + this.accountId = accountId; + this.createTime = createTime; + } + } + + // [END spanner_insert_graph_data] + + // [START spanner_create_database_with_property_graph] + static void createDatabaseWithPropertyGraph( + DatabaseAdminClient dbAdminClient, InstanceName instanceName, String databaseId) { + CreateDatabaseRequest createDatabaseRequest = + CreateDatabaseRequest.newBuilder() + .setCreateStatement("CREATE DATABASE `" + databaseId + "`") + .setParent(instanceName.toString()) + .addAllExtraStatements( + Arrays.asList( + "CREATE TABLE Person (" + + " id INT64 NOT NULL," + + " name STRING(MAX)," + + " birthday TIMESTAMP," + + " country STRING(MAX)," + + " city STRING(MAX)," + + ") PRIMARY KEY (id)", + "CREATE TABLE Account (" + + " id INT64 NOT NULL," + + " create_time TIMESTAMP," + + " is_blocked BOOL," + + " nick_name STRING(MAX)," + + ") PRIMARY KEY (id)", + "CREATE TABLE PersonOwnAccount (" + + " id INT64 NOT NULL," + + " account_id INT64 NOT NULL," + + " create_time TIMESTAMP," + + " FOREIGN KEY (account_id)" + + " REFERENCES Account (id)" + + ") PRIMARY KEY (id, account_id)," + + "INTERLEAVE IN PARENT Person ON DELETE CASCADE", + "CREATE TABLE AccountTransferAccount (" + + " id INT64 NOT NULL," + + " to_id INT64 NOT NULL," + + " amount FLOAT64," + + " create_time TIMESTAMP NOT NULL," + + " order_number STRING(MAX)," + + " FOREIGN KEY (to_id) REFERENCES Account (id)" + + ") PRIMARY KEY (id, to_id, create_time)," + + "INTERLEAVE IN PARENT Account ON DELETE CASCADE", + "CREATE OR REPLACE PROPERTY GRAPH FinGraph " + + "NODE TABLES (Account, Person)" + + "EDGE TABLES (" + + " PersonOwnAccount" + + " SOURCE KEY(id) REFERENCES Person(id)" + + " DESTINATION KEY(account_id) REFERENCES Account(id)" + + " LABEL Owns," + + " AccountTransferAccount" + + " SOURCE KEY(id) REFERENCES Account(id)" + + " DESTINATION KEY(to_id) REFERENCES Account(id)" + + " LABEL Transfers)")) + .build(); + try { + // Initiate the request which returns an OperationFuture. + com.google.spanner.admin.database.v1.Database db = + dbAdminClient.createDatabaseAsync(createDatabaseRequest).get(); + System.out.println("Created database [" + db.getName() + "]"); + } catch (ExecutionException e) { + // If the operation failed during execution, expose the cause. + System.out.println("Encountered exception" + e.getCause()); + throw (SpannerException) e.getCause(); + } catch (InterruptedException e) { + // Throw when a thread is waiting, sleeping, or otherwise occupied, + // and the thread is interrupted, either before or during the activity. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + } + + // [END spanner_create_database_with_property_graph] + + // [START spanner_insert_graph_data] + static final List ACCOUNTS = + Arrays.asList( + new Account( + 7, Timestamp.parseTimestamp("2020-01-10T06:22:20.12Z"), false, "Vacation Fund"), + new Account( + 16, Timestamp.parseTimestamp("2020-01-27T17:55:09.12Z"), true, "Vacation Fund"), + new Account( + 20, Timestamp.parseTimestamp("2020-02-18T05:44:20.12Z"), false, "Rainy Day Fund")); + + static final List PERSONS = + Arrays.asList( + new Person( + 1, + "Alex", + Timestamp.parseTimestamp("1991-12-21T00:00:00.12Z"), + "Australia", + " Adelaide"), + new Person( + 2, + "Dana", + Timestamp.parseTimestamp("1980-10-31T00:00:00.12Z"), + "Czech_Republic", + "Moravia"), + new Person( + 3, "Lee", Timestamp.parseTimestamp("1986-12-07T00:00:00.12Z"), "India", "Kollam")); + + static final List TRANSFERS = + Arrays.asList( + new Transfer( + 7, 16, 300.0, Timestamp.parseTimestamp("2020-08-29T15:28:58.12Z"), "304330008004315"), + new Transfer( + 7, 16, 100.0, Timestamp.parseTimestamp("2020-10-04T16:55:05.12Z"), "304120005529714"), + new Transfer( + 16, + 20, + 300.0, + Timestamp.parseTimestamp("2020-09-25T02:36:14.12Z"), + "103650009791820"), + new Transfer( + 20, 7, 500.0, Timestamp.parseTimestamp("2020-10-04T16:55:05.12Z"), "304120005529714"), + new Transfer( + 20, + 16, + 200.0, + Timestamp.parseTimestamp("2020-10-17T03:59:40.12Z"), + "302290001255747")); + + static final List OWNERSHIPS = + Arrays.asList( + new Own(1, 7, Timestamp.parseTimestamp("2020-01-10T06:22:20.12Z")), + new Own(2, 20, Timestamp.parseTimestamp("2020-01-27T17:55:09.12Z")), + new Own(3, 16, Timestamp.parseTimestamp("2020-02-18T05:44:20.12Z"))); + + static void insertData(DatabaseClient dbClient) { + List mutations = new ArrayList<>(); + for (Account account : ACCOUNTS) { + mutations.add( + Mutation.newInsertBuilder("Account") + .set("id") + .to(account.id) + .set("create_time") + .to(account.createTime) + .set("is_blocked") + .to(account.isBlocked) + .set("nick_name") + .to(account.nickName) + .build()); + } + for (Person person : PERSONS) { + mutations.add( + Mutation.newInsertBuilder("Person") + .set("id") + .to(person.id) + .set("name") + .to(person.name) + .set("birthday") + .to(person.birthday) + .set("country") + .to(person.country) + .set("city") + .to(person.city) + .build()); + } + for (Transfer transfer : TRANSFERS) { + mutations.add( + Mutation.newInsertBuilder("AccountTransferAccount") + .set("id") + .to(transfer.id) + .set("to_id") + .to(transfer.toId) + .set("amount") + .to(transfer.amount) + .set("create_time") + .to(transfer.createTime) + .set("order_number") + .to(transfer.orderNumber) + .build()); + } + for (Own own : OWNERSHIPS) { + mutations.add( + Mutation.newInsertBuilder("PersonOwnAccount") + .set("id") + .to(own.id) + .set("account_id") + .to(own.accountId) + .set("create_time") + .to(own.createTime) + .build()); + } + + dbClient.write(mutations); + } + + // [END spanner_insert_graph_data] + + // [START spanner_insert_graph_data_with_dml] + static void insertUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run( + transaction -> { + String sql = + "INSERT INTO Account (id, create_time, is_blocked) " + + " VALUES" + + " (1, CAST('2000-08-10 08:18:48.463959-07:52' AS TIMESTAMP), false)," + + " (2, CAST('2000-08-12 07:13:16.463959-03:41' AS TIMESTAMP), true)"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d record(s) inserted into Account.\n", rowCount); + return null; + }); + + dbClient + .readWriteTransaction() + .run( + transaction -> { + String sql = + "INSERT INTO AccountTransferAccount (id, to_id, create_time, amount) " + + " VALUES" + + " (1, 2, CAST('2000-09-11 03:11:18.463959-06:36' AS TIMESTAMP), 100)," + + " (1, 1, CAST('2000-09-12 04:09:34.463959-05:12' AS TIMESTAMP), 200) "; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d record(s) inserted into AccountTransferAccount.\n", rowCount); + return null; + }); + } + + // [END spanner_insert_graph_data_with_dml] + + // [START spanner_update_graph_data_with_dml] + static void updateUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run( + transaction -> { + String sql = "UPDATE Account SET is_blocked = false WHERE id = 2"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d Account record(s) updated.\n", rowCount); + return null; + }); + + dbClient + .readWriteTransaction() + .run( + transaction -> { + String sql = + "UPDATE AccountTransferAccount SET amount = 300 WHERE id = 1 AND to_id = 2"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d AccountTransferAccount record(s) updated.\n", rowCount); + return null; + }); + } + + // [END spanner_update_graph_data_with_dml] + + // [START spanner_update_graph_data_with_graph_query_in_dml] + static void updateUsingGraphQueryInDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run( + transaction -> { + String sql = + "UPDATE Account SET is_blocked = true " + + "WHERE id IN {" + + " GRAPH FinGraph" + + " MATCH (a:Account WHERE a.id = 1)-[:TRANSFERS]->{1,2}(b:Account)" + + " RETURN b.id}"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d Account record(s) updated.\n", rowCount); + return null; + }); + } + + // [END spanner_update_graph_data_with_graph_query_in_dml] + + // [START spanner_query_graph_data] + static void query(DatabaseClient dbClient) { + try (ResultSet resultSet = + dbClient + .singleUse() // Execute a single query against Cloud Spanner. + .executeQuery( + Statement.of( + "Graph FinGraph MATCH" + + " (a:Person)-[o:Owns]->()-[t:Transfers]->()<-[p:Owns]-(b:Person)RETURN" + + " a.name AS sender, b.name AS receiver, t.amount, t.create_time AS" + + " transfer_at"))) { + while (resultSet.next()) { + System.out.printf( + "%s %s %f %s\n", + resultSet.getString(0), + resultSet.getString(1), + resultSet.getDouble(2), + resultSet.getTimestamp(3)); + } + } + } + + // [END spanner_query_graph_data] + + // [START spanner_query_graph_data_with_parameter] + static void queryWithParameter(DatabaseClient dbClient) { + Statement statement = + Statement.newBuilder( + "Graph FinGraph MATCH" + + " (a:Person)-[o:Owns]->()-[t:Transfers]->()<-[p:Owns]-(b:Person) WHERE" + + " t.amount >= @min RETURN a.name AS sender, b.name AS receiver, t.amount," + + " t.create_time AS transfer_at") + .bind("min") + .to(500) + .build(); + try (ResultSet resultSet = dbClient.singleUse().executeQuery(statement)) { + while (resultSet.next()) { + System.out.printf( + "%s %s %f %s\n", + resultSet.getString("sender"), + resultSet.getString("receiver"), + resultSet.getDouble("amount"), + resultSet.getTimestamp("transfer_at")); + } + } + } + + // [END spanner_query_graph_data_with_parameter] + + // [START spanner_delete_graph_data_with_dml] + static void deleteUsingDml(DatabaseClient dbClient) { + dbClient + .readWriteTransaction() + .run( + transaction -> { + String sql = "DELETE FROM AccountTransferAccount WHERE id = 1 AND to_id = 2"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d AccountTransferAccount record(s) deleted.\n", rowCount); + return null; + }); + + dbClient + .readWriteTransaction() + .run( + transaction -> { + String sql = "DELETE FROM Account WHERE id = 2"; + long rowCount = transaction.executeUpdate(Statement.of(sql)); + System.out.printf("%d Account record(s) deleted.\n", rowCount); + return null; + }); + } + + // [END spanner_delete_graph_data_with_dml] + + // [START spanner_delete_graph_data] + static void deleteData(DatabaseClient dbClient) { + List mutations = new ArrayList<>(); + + // KeySet.Builder can be used to delete a specific set of rows. + // Delete the PersonOwnAccount rows with the key values (1,7) and (2,20). + mutations.add( + Mutation.delete( + "PersonOwnAccount", + KeySet.newBuilder().addKey(Key.of(1, 7)).addKey(Key.of(2, 20)).build())); + + // KeyRange can be used to delete rows with a key in a specific range. + // Delete a range of rows where the key prefix is >=1 and <8 + mutations.add( + Mutation.delete( + "AccountTransferAccount", KeySet.range(KeyRange.closedOpen(Key.of(1), Key.of(8))))); + + // KeySet.all() can be used to delete all the rows in a table. + // Delete all Account rows, which will also delete the remaining + // AccountTransferAccount rows since it was defined with ON DELETE CASCADE. + mutations.add(Mutation.delete("Account", KeySet.all())); + + // KeySet.all() can be used to delete all the rows in a table. + // Delete all Person rows, which will also delete the remaining + // PersonOwnAccount rows since it was defined with ON DELETE CASCADE. + mutations.add(Mutation.delete("Person", KeySet.all())); + + dbClient.write(mutations); + System.out.printf("Records deleted.\n"); + } + + // [END spanner_delete_graph_data] + + static void run( + DatabaseClient dbClient, + DatabaseAdminClient dbAdminClient, + String command, + DatabaseId database) { + switch (command) { + case "createdatabase": + createDatabaseWithPropertyGraph( + dbAdminClient, + InstanceName.of( + database.getInstanceId().getProject(), database.getInstanceId().getInstance()), + database.getDatabase()); + break; + case "insert": + insertData(dbClient); + break; + case "insertusingdml": + insertUsingDml(dbClient); + break; + case "updateusingdml": + updateUsingDml(dbClient); + break; + case "updateusinggraphqueryindml": + updateUsingGraphQueryInDml(dbClient); + break; + case "query": + query(dbClient); + break; + case "querywithparameter": + queryWithParameter(dbClient); + break; + case "deleteusingdml": + deleteUsingDml(dbClient); + break; + case "delete": + deleteData(dbClient); + break; + default: + printUsageAndExit(); + } + } + + static void printUsageAndExit() { + System.err.println("Usage:"); + System.err.println(" SpannerGraphExample "); + System.err.println(""); + System.err.println("Examples:"); + System.err.println(" SpannerGraphExample createdatabase my-instance example-db"); + System.err.println(" SpannerGraphExample insert my-instance example-db"); + System.err.println(" SpannerGraphExample insertusingdml my-instance example-db"); + System.err.println(" SpannerGraphExample updateusingdml my-instance example-db"); + System.err.println(" SpannerGraphExample updateusinggraphqueryindml my-instance example-db"); + System.err.println(" SpannerGraphExample query my-instance example-db"); + System.err.println(" SpannerGraphExample querywithparameter my-instance example-db"); + System.err.println(" SpannerGraphExample deleteusingdml my-instance example-db"); + System.err.println(" SpannerGraphExample delete my-instance example-db"); + System.exit(1); + } + + public static void main(String[] args) { + if (args.length != 3 && args.length != 4) { + printUsageAndExit(); + } + SpannerOptions options = SpannerOptions.newBuilder().build(); + Spanner spanner = options.getService(); + DatabaseAdminClient dbAdminClient = null; + try { + final String command = args[0]; + DatabaseId db = DatabaseId.of(options.getProjectId(), args[1], args[2]); + // This will return the default project id based on the environment. + String clientProject = spanner.getOptions().getProjectId(); + if (!db.getInstanceId().getProject().equals(clientProject)) { + System.err.println( + "Invalid project specified. Project in the database id should match the" + + "project name set in the environment variable GOOGLE_CLOUD_PROJECT. Expected: " + + clientProject); + printUsageAndExit(); + } + + DatabaseClient dbClient = spanner.getDatabaseClient(db); + dbAdminClient = spanner.createDatabaseAdminClient(); + + run(dbClient, dbAdminClient, command, db); + } finally { + if (dbAdminClient != null) { + if (!dbAdminClient.isShutdown() || !dbAdminClient.isTerminated()) { + dbAdminClient.close(); + } + } + spanner.close(); + } + System.out.println("Closed client"); + } +} diff --git a/samples/snippets/src/main/java/com/example/spanner/UpdateBackupScheduleSample.java b/samples/snippets/src/main/java/com/example/spanner/UpdateBackupScheduleSample.java new file mode 100644 index 00000000000..b49ec4901f6 --- /dev/null +++ b/samples/snippets/src/main/java/com/example/spanner/UpdateBackupScheduleSample.java @@ -0,0 +1,83 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_update_backup_schedule] + +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.protobuf.Duration; +import com.google.protobuf.FieldMask; +import com.google.spanner.admin.database.v1.BackupSchedule; +import com.google.spanner.admin.database.v1.BackupScheduleName; +import com.google.spanner.admin.database.v1.BackupScheduleSpec; +import com.google.spanner.admin.database.v1.CreateBackupEncryptionConfig; +import com.google.spanner.admin.database.v1.CrontabSpec; +import com.google.spanner.admin.database.v1.UpdateBackupScheduleRequest; +import java.io.IOException; + +class UpdateBackupScheduleSample { + + static void updateBackupSchedule() throws IOException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + String backupScheduleId = "my-backup-schedule"; + updateBackupSchedule(projectId, instanceId, databaseId, backupScheduleId); + } + + static void updateBackupSchedule( + String projectId, String instanceId, String databaseId, String backupScheduleId) + throws IOException { + BackupScheduleName backupScheduleName = + BackupScheduleName.of(projectId, instanceId, databaseId, backupScheduleId); + final CreateBackupEncryptionConfig encryptionConfig = + CreateBackupEncryptionConfig.newBuilder() + .setEncryptionType(CreateBackupEncryptionConfig.EncryptionType.USE_DATABASE_ENCRYPTION) + .build(); + final BackupSchedule backupSchedule = + BackupSchedule.newBuilder() + .setName(backupScheduleName.toString()) + .setRetentionDuration(Duration.newBuilder().setSeconds(3600 * 48)) + .setSpec( + BackupScheduleSpec.newBuilder() + .setCronSpec(CrontabSpec.newBuilder().setText("45 15 * * *").build()) + .build()) + .setEncryptionConfig(encryptionConfig) + .build(); + + try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) { + final FieldMask fieldMask = + FieldMask.newBuilder() + .addPaths("retention_duration") + .addPaths("spec.cron_spec.text") + .addPaths("encryption_config") + .build(); + final BackupSchedule updatedBackupSchedule = + databaseAdminClient.updateBackupSchedule( + UpdateBackupScheduleRequest.newBuilder() + .setBackupSchedule(backupSchedule) + .setUpdateMask(fieldMask) + .build()); + System.out.println( + String.format( + "Updated backup schedule: %s\n%s", + updatedBackupSchedule.getName(), updatedBackupSchedule.toString())); + } + } +} +// [END spanner_update_backup_schedule] diff --git a/samples/snippets/src/main/java/com/example/spanner/UpdateInstanceExample.java b/samples/snippets/src/main/java/com/example/spanner/UpdateInstanceExample.java new file mode 100644 index 00000000000..cb4eadd097a --- /dev/null +++ b/samples/snippets/src/main/java/com/example/spanner/UpdateInstanceExample.java @@ -0,0 +1,81 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_update_instance] + +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient; +import com.google.common.collect.Lists; +import com.google.protobuf.FieldMask; +import com.google.spanner.admin.instance.v1.Instance; +import com.google.spanner.admin.instance.v1.InstanceConfigName; +import com.google.spanner.admin.instance.v1.InstanceName; +import com.google.spanner.admin.instance.v1.UpdateInstanceRequest; +import java.util.concurrent.ExecutionException; + +public class UpdateInstanceExample { + + static void updateInstance() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + updateInstance(projectId, instanceId); + } + + static void updateInstance(String projectId, String instanceId) { + // Set Instance configuration. + int nodeCount = 2; + String displayName = "Updated name"; + + // Update an Instance object that will be used to update the instance. + Instance instance = + Instance.newBuilder() + .setName(InstanceName.of(projectId, instanceId).toString()) + .setDisplayName(displayName) + .setNodeCount(nodeCount) + .setEdition(Instance.Edition.ENTERPRISE) + .setConfig(InstanceConfigName.of(projectId, "regional-us-east4").toString()) + .build(); + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + InstanceAdminClient instanceAdminClient = spanner.createInstanceAdminClient()) { + + // Wait for the updatedInstance operation to finish. + Instance updatedInstance = + instanceAdminClient + .updateInstanceAsync( + UpdateInstanceRequest.newBuilder() + .setFieldMask( + FieldMask.newBuilder().addAllPaths(Lists.newArrayList("edition"))) + .setInstance(instance) + .build()) + .get(); + System.out.printf("Instance %s was successfully updated%n", updatedInstance.getName()); + } catch (ExecutionException e) { + System.out.printf( + "Error: Updating instance %s failed with error message %s%n", + instance.getName(), e.getMessage()); + } catch (InterruptedException e) { + System.out.println("Error: Waiting for updateInstance operation to finish was interrupted"); + } + } +} + +// [END spanner_update_instance] diff --git a/samples/snippets/src/main/java/com/example/spanner/UpdateProtoDataSample.java b/samples/snippets/src/main/java/com/example/spanner/UpdateProtoDataSample.java new file mode 100644 index 00000000000..37712a27dc3 --- /dev/null +++ b/samples/snippets/src/main/java/com/example/spanner/UpdateProtoDataSample.java @@ -0,0 +1,91 @@ +/* + * Copyright 2024 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_update_data_with_proto_types] + +import com.example.spanner.SingerProto.Genre; +import com.example.spanner.SingerProto.SingerInfo; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ProtocolMessageEnum; +import java.util.Collections; +import java.util.List; + +class UpdateProtoDataSample { + + static void updateProtoData() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + updateProtoData(client); + } + } + + static void updateProtoData(DatabaseClient client) { + SingerInfo singerInfo = + SingerInfo.newBuilder() + .setSingerId(2) + .setBirthDate("February") + .setNationality("Country2") + .setGenre(Genre.FOLK) + .build(); + Genre singerGenre = Genre.FOLK; + List singerInfoList = Collections.singletonList(singerInfo); + List singerGenreList = Collections.singletonList(singerGenre); + + client.write( + ImmutableList.of( + Mutation.newInsertOrUpdateBuilder("Singers") + .set("SingerId") + .to(2L) + .set("SingerInfo") + .to(singerInfo) + .set("SingerInfoArray") + .toProtoMessageArray(singerInfoList, SingerInfo.getDescriptor()) + .set("SingerGenre") + .to(singerGenre) + .set("SingerGenreArray") + .toProtoEnumArray(singerGenreList, Genre.getDescriptor()) + .build(), + Mutation.newInsertOrUpdateBuilder("Singers") + .set("SingerId") + .to(3L) + .set("SingerInfo") + .to(null, SingerInfo.getDescriptor()) + .set("SingerInfoArray") + .toProtoMessageArray(null, SingerInfo.getDescriptor()) + .set("SingerGenre") + .to(null, Genre.getDescriptor()) + .set("SingerGenreArray") + .toProtoEnumArray(null, Genre.getDescriptor()) + .build())); + System.out.println("Data updated"); + } +} +// [END spanner_update_data_with_proto_types] diff --git a/samples/snippets/src/main/java/com/example/spanner/UpdateProtoDataSampleUsingDml.java b/samples/snippets/src/main/java/com/example/spanner/UpdateProtoDataSampleUsingDml.java new file mode 100644 index 00000000000..9b85f774eb7 --- /dev/null +++ b/samples/snippets/src/main/java/com/example/spanner/UpdateProtoDataSampleUsingDml.java @@ -0,0 +1,97 @@ +/* + * Copyright 2024 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +// [START spanner_update_data_with_proto_types_with_dml] + +import com.example.spanner.SingerProto.Genre; +import com.example.spanner.SingerProto.SingerInfo; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ProtocolMessageEnum; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +class UpdateProtoDataSampleUsingDml { + + static void updateProtoDataUsingDml() { + // TODO(developer): Replace these variables before running the sample. + String projectId = "my-project"; + String instanceId = "my-instance"; + String databaseId = "my-database"; + + try (Spanner spanner = + SpannerOptions.newBuilder().setProjectId(projectId).build().getService()) { + DatabaseClient client = + spanner.getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); + updateProtoDataUsingDml(client); + } + } + + static void updateProtoDataUsingDml(DatabaseClient client) { + SingerInfo singerInfo = + SingerInfo.newBuilder() + .setSingerId(1) + .setBirthDate("January") + .setNationality("Country1") + .setGenre(Genre.ROCK) + .build(); + Genre singerGenre = Genre.ROCK; + List singerInfoList = Collections.singletonList(singerInfo); + List singerGenreList = Collections.singletonList(singerGenre); + + client + .readWriteTransaction() + .run( + transaction -> { + Statement statement1 = + Statement.newBuilder( + "UPDATE Singers SET SingerInfo = @singerInfo, " + + "SingerInfoArray=@singerInfoArray, " + + "SingerGenre=@singerGenre, SingerGenreArray=@singerGenreArray " + + "WHERE SingerId = 1") + .bind("singerInfo") + .to(singerInfo) + .bind("singerInfoArray") + .toProtoMessageArray(singerInfoList, SingerInfo.getDescriptor()) + .bind("singerGenre") + .to(singerGenre) + .bind("singerGenreArray") + .toProtoEnumArray(singerGenreList, Genre.getDescriptor()) + .build(); + + Statement statement2 = + Statement.newBuilder( + "UPDATE Singers SET SingerInfo.nationality = @singerNationality " + + "WHERE SingerId = 1") + .bind("singerNationality") + .to("Country2") + .build(); + + transaction.batchUpdate(Arrays.asList(statement1, statement2)); + return null; + }); + + System.out.println("record(s) updated"); + } +} +// [END spanner_update_data_with_proto_types_with_dml] diff --git a/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceExample.java b/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceExample.java index 15b33ae8927..a17784d874b 100644 --- a/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceExample.java +++ b/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceExample.java @@ -42,7 +42,7 @@ static void createInstance(String projectId, String instanceId) { InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient(); // Set Instance configuration. - String configId = "regional-us-central1"; + String configId = "regional-us-east4"; int nodeCount = 2; String displayName = "Descriptive name"; diff --git a/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithAutoscalingConfigExample.java b/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithAutoscalingConfigExample.java index 3fe60c554bb..f8a683865ac 100644 --- a/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithAutoscalingConfigExample.java +++ b/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithAutoscalingConfigExample.java @@ -44,7 +44,7 @@ static void createInstance(String projectId, String instanceId) { InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient(); // Set Instance configuration. - String configId = "regional-us-central1"; + String configId = "regional-us-east4"; // Create an autoscaling config. AutoscalingConfig autoscalingConfig = AutoscalingConfig.newBuilder() diff --git a/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithProcessingUnitsExample.java b/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithProcessingUnitsExample.java index f688b4cdbf9..95d4f1b6737 100644 --- a/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithProcessingUnitsExample.java +++ b/samples/snippets/src/main/java/com/example/spanner/admin/archived/CreateInstanceWithProcessingUnitsExample.java @@ -42,7 +42,7 @@ static void createInstance(String projectId, String instanceId) { InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient(); // Set Instance configuration. - String configId = "regional-us-central1"; + String configId = "regional-us-east4"; // This will create an instance with the processing power of 0.2 nodes. int processingUnits = 500; String displayName = "Descriptive name"; diff --git a/samples/snippets/src/main/resources/com/example/spanner/README.md b/samples/snippets/src/main/resources/com/example/spanner/README.md new file mode 100644 index 00000000000..6dc4f7aa59f --- /dev/null +++ b/samples/snippets/src/main/resources/com/example/spanner/README.md @@ -0,0 +1,6 @@ +#### To generate SingerProto.java and descriptors.pb file from singer.proto using `protoc` +```shell +cd samples/snippets/src/main/resources/ +protoc --proto_path=com/example/spanner/ --include_imports --descriptor_set_out=com/example/spanner/descriptors.pb + --java_out=. com/example/spanner/singer.proto +``` diff --git a/samples/snippets/src/main/resources/com/example/spanner/descriptors.pb b/samples/snippets/src/main/resources/com/example/spanner/descriptors.pb new file mode 100644 index 00000000000..dd9cf8d4344 Binary files /dev/null and b/samples/snippets/src/main/resources/com/example/spanner/descriptors.pb differ diff --git a/samples/snippets/src/main/resources/com/example/spanner/singer.proto b/samples/snippets/src/main/resources/com/example/spanner/singer.proto new file mode 100644 index 00000000000..12b213f3fae --- /dev/null +++ b/samples/snippets/src/main/resources/com/example/spanner/singer.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package examples.spanner.music; + +option java_package = "com.example.spanner"; +option java_outer_classname = "SingerProto"; +option java_multiple_files = false; + +message SingerInfo { + optional int64 singer_id = 1; + optional string birth_date = 2; + optional string nationality = 3; + optional Genre genre = 4; +} + +enum Genre { + POP = 0; + JAZZ = 1; + FOLK = 2; + ROCK = 3; +} diff --git a/samples/snippets/src/test/java/com/example/spanner/CreateFullBackupScheduleSampleIT.java b/samples/snippets/src/test/java/com/example/spanner/CreateFullBackupScheduleSampleIT.java new file mode 100644 index 00000000000..15ec04fe306 --- /dev/null +++ b/samples/snippets/src/test/java/com/example/spanner/CreateFullBackupScheduleSampleIT.java @@ -0,0 +1,50 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.spanner.admin.database.v1.BackupScheduleName; +import java.util.UUID; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class CreateFullBackupScheduleSampleIT extends SampleTestBaseV2 { + // Default instance and given db should exist for tests to pass. + private static String databaseId = System.getProperty("spanner.sample.database", "mysample"); + + @Test + public void testCreateFullBackupScheduleSample() throws Exception { + String backupScheduleId = String.format("schedule-%s", UUID.randomUUID()); + BackupScheduleName backupScheduleName = + BackupScheduleName.of(projectId, instanceId, databaseId, backupScheduleId); + String out = + SampleRunner.runSample( + () -> { + try { + CreateFullBackupScheduleSample.createFullBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId); + } finally { + DeleteBackupScheduleSample.deleteBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId); + } + }); + assertThat(out).contains(String.format("Created backup schedule: %s", backupScheduleName)); + } +} diff --git a/samples/snippets/src/test/java/com/example/spanner/CreateIncrementalBackupScheduleSampleIT.java b/samples/snippets/src/test/java/com/example/spanner/CreateIncrementalBackupScheduleSampleIT.java new file mode 100644 index 00000000000..74136562e10 --- /dev/null +++ b/samples/snippets/src/test/java/com/example/spanner/CreateIncrementalBackupScheduleSampleIT.java @@ -0,0 +1,51 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.spanner.admin.database.v1.BackupScheduleName; +import java.util.UUID; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class CreateIncrementalBackupScheduleSampleIT extends SampleTestBaseV2 { + // Default instance and given db should exist for tests to pass. + private static String databaseId = System.getProperty("spanner.sample.database", "mysample"); + + @Test + public void testCreateIncrementalBackupScheduleSample() throws Exception { + String backupScheduleId = String.format("schedule-%s", UUID.randomUUID()); + BackupScheduleName backupScheduleName = + BackupScheduleName.of(projectId, instanceId, databaseId, backupScheduleId); + String out = + SampleRunner.runSample( + () -> { + try { + CreateIncrementalBackupScheduleSample.createIncrementalBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId); + } finally { + DeleteBackupScheduleSample.deleteBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId); + } + }); + assertThat(out) + .contains(String.format("Created incremental backup schedule: %s", backupScheduleName)); + } +} diff --git a/samples/snippets/src/test/java/com/example/spanner/CreateInstancePartitionSampleIT.java b/samples/snippets/src/test/java/com/example/spanner/CreateInstancePartitionSampleIT.java new file mode 100644 index 00000000000..3038d29750d --- /dev/null +++ b/samples/snippets/src/test/java/com/example/spanner/CreateInstancePartitionSampleIT.java @@ -0,0 +1,55 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.spanner.InstanceAdminClient; +import com.google.cloud.spanner.InstanceConfigId; +import com.google.cloud.spanner.InstanceId; +import com.google.cloud.spanner.InstanceInfo; +import com.google.spanner.admin.instance.v1.InstancePartitionName; +import org.junit.Test; + +public class CreateInstancePartitionSampleIT extends SampleTestBaseV2 { + + @Test + public void testCreateInstancePartition() throws Exception { + String instanceId = idGenerator.generateInstanceId(); + InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient(); + instanceAdminClient + .createInstance( + InstanceInfo.newBuilder(InstanceId.of(projectId, instanceId)) + .setDisplayName("Geo-partitioning test instance") + .setInstanceConfigId(InstanceConfigId.of(projectId, "regional-us-central1")) + .setNodeCount(1) + .build()) + .get(); + + String instancePartitionId = "my-instance-partition"; + String out = + SampleRunner.runSample( + () -> + CreateInstancePartitionSample.createInstancePartition( + projectId, instanceId, instancePartitionId)); + assertThat(out) + .contains( + String.format( + "Instance partition %s", + InstancePartitionName.of(projectId, instanceId, instancePartitionId).toString())); + } +} diff --git a/samples/snippets/src/test/java/com/example/spanner/DeleteBackupScheduleSampleIT.java b/samples/snippets/src/test/java/com/example/spanner/DeleteBackupScheduleSampleIT.java new file mode 100644 index 00000000000..3d11bd8dce1 --- /dev/null +++ b/samples/snippets/src/test/java/com/example/spanner/DeleteBackupScheduleSampleIT.java @@ -0,0 +1,50 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.spanner.admin.database.v1.BackupScheduleName; +import java.util.UUID; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class DeleteBackupScheduleSampleIT extends SampleTestBaseV2 { + // Default instance and given db should exist for tests to pass. + private static String databaseId = System.getProperty("spanner.sample.database", "mysample"); + + @Test + public void testDeleteBackupScheduleSample() throws Exception { + String backupScheduleId = String.format("schedule-%s", UUID.randomUUID()); + BackupScheduleName backupScheduleName = + BackupScheduleName.of(projectId, instanceId, databaseId, backupScheduleId); + String out = + SampleRunner.runSample( + () -> { + try { + CreateFullBackupScheduleSample.createFullBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId); + } finally { + DeleteBackupScheduleSample.deleteBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId); + } + }); + assertThat(out).contains(String.format("Deleted backup schedule: %s", backupScheduleName)); + } +} diff --git a/samples/snippets/src/test/java/com/example/spanner/GetBackupScheduleSampleIT.java b/samples/snippets/src/test/java/com/example/spanner/GetBackupScheduleSampleIT.java new file mode 100644 index 00000000000..fa006355a23 --- /dev/null +++ b/samples/snippets/src/test/java/com/example/spanner/GetBackupScheduleSampleIT.java @@ -0,0 +1,53 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.spanner.admin.database.v1.BackupScheduleName; +import java.util.UUID; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class GetBackupScheduleSampleIT extends SampleTestBaseV2 { + // Default instance and given db should exist for tests to pass. + private static String databaseId = System.getProperty("spanner.sample.database", "mysample"); + + @Test + public void testGetBackupScheduleSample() throws Exception { + String backupScheduleId = String.format("schedule-%s", UUID.randomUUID()); + BackupScheduleName backupScheduleName = + BackupScheduleName.of(projectId, instanceId, databaseId, backupScheduleId); + String out = + SampleRunner.runSample( + () -> { + try { + CreateFullBackupScheduleSample.createFullBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId); + GetBackupScheduleSample.getBackupSchedule( + projectId, instanceId, + databaseId, backupScheduleId); + } finally { + DeleteBackupScheduleSample.deleteBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId); + } + }); + assertThat(out).contains(String.format("Backup schedule: %s", backupScheduleName)); + } +} diff --git a/samples/snippets/src/test/java/com/example/spanner/ListBackupSchedulesSampleIT.java b/samples/snippets/src/test/java/com/example/spanner/ListBackupSchedulesSampleIT.java new file mode 100644 index 00000000000..386b9442c01 --- /dev/null +++ b/samples/snippets/src/test/java/com/example/spanner/ListBackupSchedulesSampleIT.java @@ -0,0 +1,61 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.spanner.admin.database.v1.BackupScheduleName; +import java.util.UUID; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ListBackupSchedulesSampleIT extends SampleTestBaseV2 { + // Default instance and given db should exist for tests to pass. + private static String databaseId = System.getProperty("spanner.sample.database", "mysample"); + + @Test + public void testListBackupSchedulesSample() throws Exception { + String backupScheduleId1 = String.format("schedule-%s", UUID.randomUUID()); + BackupScheduleName backupScheduleName1 = + BackupScheduleName.of(projectId, instanceId, databaseId, backupScheduleId1); + + String backupScheduleId2 = String.format("schedule-%s", UUID.randomUUID()); + BackupScheduleName backupScheduleName2 = + BackupScheduleName.of(projectId, instanceId, databaseId, backupScheduleId2); + + String out = + SampleRunner.runSample( + () -> { + try { + CreateFullBackupScheduleSample.createFullBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId1); + CreateFullBackupScheduleSample.createFullBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId2); + ListBackupSchedulesSample.listBackupSchedules(projectId, instanceId, databaseId); + } finally { + DeleteBackupScheduleSample.deleteBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId1); + DeleteBackupScheduleSample.deleteBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId2); + } + }); + assertThat(out).contains(String.format("Backup schedule: %s", backupScheduleName1)); + assertThat(out).contains(String.format("Backup schedule: %s", backupScheduleName2)); + } +} diff --git a/samples/snippets/src/test/java/com/example/spanner/ProtoColumnSampleIT.java b/samples/snippets/src/test/java/com/example/spanner/ProtoColumnSampleIT.java new file mode 100644 index 00000000000..ba59f7b2384 --- /dev/null +++ b/samples/snippets/src/test/java/com/example/spanner/ProtoColumnSampleIT.java @@ -0,0 +1,129 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.example.spanner.SampleRunner.runSample; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.DatabaseAdminClient; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.common.collect.ImmutableList; +import java.io.InputStream; +import java.util.Arrays; +import java.util.Collections; +import java.util.concurrent.TimeUnit; +import org.junit.After; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Integration tests for Proto Column type */ +@RunWith(JUnit4.class) +public class ProtoColumnSampleIT extends SampleTestBase { + + private static DatabaseId databaseId; + private static Spanner spanner; + + @BeforeClass + public static void createTestDatabase() throws Exception { + spanner = SpannerOptions.newBuilder().setProjectId(projectId).build().getService(); + DatabaseAdminClient databaseAdminClient = spanner.getDatabaseAdminClient(); + final String database = idGenerator.generateDatabaseId(); + databaseAdminClient + .createDatabase( + instanceId, + database, + ImmutableList.of( + "CREATE TABLE Singers (\n" + + " SingerId INT64 NOT NULL,\n" + + " FirstName STRING(1024),\n" + + " LastName STRING(1024),\n" + + " ) PRIMARY KEY (SingerId)")) + .get(10, TimeUnit.MINUTES); + databaseId = DatabaseId.of(projectId, instanceId, database); + + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + client.write( + Arrays.asList( + Mutation.newInsertOrUpdateBuilder("Singers") + .set("SingerId") + .to(1L) + .set("FirstName") + .to("Marc") + .set("LastName") + .to("Richards") + .build(), + Mutation.newInsertOrUpdateBuilder("Singers") + .set("SingerId") + .to(2L) + .set("FirstName") + .to("Catalina") + .set("LastName") + .to("Smith") + .build(), + Mutation.newInsertOrUpdateBuilder("Singers") + .set("SingerId") + .to(3L) + .set("FirstName") + .to("Alice") + .set("LastName") + .to("Trentor") + .build())); + } + + @After + public void removeTestData() { + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + client.write(Collections.singletonList(Mutation.delete("Singers", KeySet.all()))); + } + + @Test + public void testProtoColumns() throws Exception { + InputStream in = + ProtoColumnSampleIT.class + .getClassLoader() + .getResourceAsStream("com/example/spanner/descriptors.pb"); + System.out.println(in); + final DatabaseClient client = spanner.getDatabaseClient(databaseId); + System.out.println(databaseId.toString()); + System.out.println("Adding Proto columns schema to table ..."); + String out = + runSample( + () -> + AddProtoColumnSample.addProtoColumn( + projectId, instanceId, databaseId.getDatabase())); + assertTrue(out.contains("Added Proto columns")); + + System.out.println("Update data with Proto Columns ..."); + out = runSample(() -> UpdateProtoDataSample.updateProtoData(client)); + assertTrue(out.contains("Data updated")); + + System.out.println("Update data with Proto Columns using DML ..."); + out = runSample(() -> UpdateProtoDataSampleUsingDml.updateProtoDataUsingDml(client)); + assertTrue(out.contains("record(s) updated")); + + System.out.println("Query data with Proto Columns ..."); + out = runSample(() -> QueryWithProtoParameterSample.queryWithProtoParameter(client)); + assertTrue(out.contains("2 singer_id: 2")); + } +} diff --git a/samples/snippets/src/test/java/com/example/spanner/SpannerGraphSampleIT.java b/samples/snippets/src/test/java/com/example/spanner/SpannerGraphSampleIT.java new file mode 100644 index 00000000000..6f778de49ab --- /dev/null +++ b/samples/snippets/src/test/java/com/example/spanner/SpannerGraphSampleIT.java @@ -0,0 +1,109 @@ +/* + * Copyright 2024 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Unit tests for {@code SpannerGraphSample} */ +@RunWith(JUnit4.class) +@SuppressWarnings("checkstyle:abbreviationaswordinname") +public class SpannerGraphSampleIT extends SampleTestBaseV2 { + + private static final int DBID_LENGTH = 20; + // The instance needs to exist for tests to pass. + private static final String instanceId = System.getProperty("spanner.test.instance"); + private static final String baseDbId = System.getProperty("spanner.sample.database"); + static Spanner spanner; + static DatabaseAdminClient databaseAdminClient; + + private String runSample(String command, String databaseId) throws Exception { + System.out.println("Running " + command + " on " + instanceId + ":" + databaseId); + PrintStream stdOut = System.out; + ByteArrayOutputStream bout = new ByteArrayOutputStream(); + PrintStream out = new PrintStream(bout); + System.setOut(out); + SpannerGraphSample.main(new String[] {command, instanceId, databaseId}); + System.setOut(stdOut); + return bout.toString(); + } + + @Test + public void testSample() throws Exception { + String databaseId = idGenerator.generateDatabaseId(); + assertThat(instanceId).isNotNull(); + assertThat(databaseId).isNotNull(); + + System.out.println("Create database with property graph ..."); + String out = runSample("createdatabase", databaseId); + + DatabaseId dbId = DatabaseId.of(projectId, instanceId, databaseId); + assertThat(out).contains("Created database"); + assertThat(out).contains(dbId.getName()); + + System.out.println("Insert some data ..."); + out = runSample("insert", databaseId); + + System.out.println("Insert more data using DML ..."); + out = runSample("insertusingdml", databaseId); + assertThat(out).contains("2 record(s) inserted into Account."); + assertThat(out).contains("2 record(s) inserted into AccountTransferAccount."); + + System.out.println("Update some data using DML ..."); + out = runSample("updateusingdml", databaseId); + assertThat(out).contains("1 Account record(s) updated."); + assertThat(out).contains("1 AccountTransferAccount record(s) updated."); + + System.out.println("Update some data using a graph query in DML ..."); + out = runSample("updateusinggraphqueryindml", databaseId); + assertThat(out).contains("2 Account record(s) updated."); + + System.out.println("Query the property graph ..."); + out = runSample("query", databaseId); + assertThat(out).contains("Dana Alex 500.0"); + assertThat(out).contains("Lee Dana 300.0"); + assertThat(out).contains("Alex Lee 300.0"); + assertThat(out).contains("Alex Lee 100.0"); + assertThat(out).contains("Dana Lee 200.0"); + + System.out.println("Query the property graph with a parameter ..."); + out = runSample("querywithparameter", databaseId); + assertThat(out).contains("Dana Alex 500.0"); + + System.out.println("Delete some data using DML ..."); + out = runSample("deleteusingdml", databaseId); + assertThat(out).contains("1 Account record(s) deleted."); + + System.out.println("Delete the remaining data in the database ..."); + out = runSample("delete", databaseId); + assertThat(out).contains("Records deleted."); + + System.out.println("Query the property graph ..."); + out = runSample("query", databaseId); + assertThat(out).doesNotContain("Dana"); + assertThat(out).doesNotContain("Alex"); + assertThat(out).doesNotContain("Lee"); + } +} diff --git a/samples/snippets/src/test/java/com/example/spanner/SpannerSampleIT.java b/samples/snippets/src/test/java/com/example/spanner/SpannerSampleIT.java index 422d7618deb..e0da1ca67f4 100644 --- a/samples/snippets/src/test/java/com/example/spanner/SpannerSampleIT.java +++ b/samples/snippets/src/test/java/com/example/spanner/SpannerSampleIT.java @@ -54,9 +54,7 @@ import org.junit.runner.RunWith; import org.junit.runners.JUnit4; -/** - * Unit tests for {@code SpannerSample} - */ +/** Unit tests for {@code SpannerSample} */ @RunWith(JUnit4.class) @SuppressWarnings("checkstyle:abbreviationaswordinname") public class SpannerSampleIT extends SampleTestBaseV2 { @@ -84,7 +82,7 @@ private String runSample(String command, String databaseId) throws Exception { ByteArrayOutputStream bout = new ByteArrayOutputStream(); PrintStream out = new PrintStream(bout); System.setOut(out); - SpannerSample.main(new String[]{command, instanceId, databaseId, null}); + SpannerSample.main(new String[] {command, instanceId, databaseId, null}); System.setOut(stdOut); return bout.toString(); } @@ -94,7 +92,7 @@ private String runSample(String command, String databaseId, String backupId) thr ByteArrayOutputStream bout = new ByteArrayOutputStream(); PrintStream out = new PrintStream(bout); System.setOut(out); - SpannerSample.main(new String[]{command, instanceId, databaseId, backupId}); + SpannerSample.main(new String[] {command, instanceId, databaseId, backupId}); System.setOut(stdOut); return bout.toString(); } @@ -147,17 +145,20 @@ static void deleteStaleTestDatabases() throws IOException { Pattern samplePattern = getTestDbIdPattern(SpannerSampleIT.baseDbId); Pattern restoredPattern = getTestDbIdPattern("restored"); try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) { - for (Database db : databaseAdminClient.listDatabases(InstanceName.of(projectId, instanceId)) - .iterateAll()) { + for (Database db : + databaseAdminClient.listDatabases(InstanceName.of(projectId, instanceId)).iterateAll()) { DatabaseName databaseName = DatabaseName.parse(db.getName()); - if (TimeUnit.HOURS.convert(now.getSeconds() - db.getCreateTime().getSeconds(), - TimeUnit.SECONDS) > 24) { + if (TimeUnit.HOURS.convert( + now.getSeconds() - db.getCreateTime().getSeconds(), TimeUnit.SECONDS) + > 24) { if (databaseName.getDatabase().length() >= DBID_LENGTH) { - if (samplePattern.matcher( - toComparableId(SpannerSampleIT.baseDbId, databaseName.getDatabase())).matches()) { + if (samplePattern + .matcher(toComparableId(SpannerSampleIT.baseDbId, databaseName.getDatabase())) + .matches()) { databaseAdminClient.dropDatabase(db.getName()); } - if (restoredPattern.matcher(toComparableId("restored", databaseName.getDatabase())) + if (restoredPattern + .matcher(toComparableId("restored", databaseName.getDatabase())) .matches()) { databaseAdminClient.dropDatabase(db.getName()); } @@ -395,11 +396,11 @@ public void testBackupSamples_withoutEncryption() { try { System.out.println("List Backup Operations ..."); out = runSample("listbackupoperations", databaseId, backupId); - assertThat(out).contains( - String.format( - "Backup %s on database %s pending:", backupName, dbId.getName())); - assertTrue("Out does not contain copy backup operations", out.contains( - "Copy Backup Operations")); + assertThat(out) + .contains( + String.format("Backup %s on database %s pending:", backupName, dbId.getName())); + assertTrue( + "Out does not contain copy backup operations", out.contains("Copy Backup Operations")); } catch (SpannerException e) { assertThat(e.getErrorCode()).isEqualTo(ErrorCode.INVALID_ARGUMENT); assertThat(e.getMessage()).contains("Cannot evaluate filter expression"); @@ -408,16 +409,16 @@ public void testBackupSamples_withoutEncryption() { System.out.println("List Backup ..."); out = runSample("listbackups", databaseId, backupId); assertThat(out).contains("All backups:"); - assertThat(out).contains( - String.format("All backups with backup name containing \"%s\":", backupId)); - assertThat(out).contains(String.format( - "All backups for databases with a name containing \"%s\":", - dbId.getDatabase())); - assertThat(out).contains( - String.format("All backups that expire before")); + assertThat(out) + .contains(String.format("All backups with backup name containing \"%s\":", backupId)); + assertThat(out) + .contains( + String.format( + "All backups for databases with a name containing \"%s\":", dbId.getDatabase())); + assertThat(out).contains(String.format("All backups that expire before")); assertThat(out).contains("All backups with size greater than 100 bytes:"); - assertThat(out).containsMatch( - Pattern.compile("All databases created after (.+) and that are ready:")); + assertThat(out) + .containsMatch(Pattern.compile("All databases created after (.+) and that are ready:")); assertThat(out).contains("All backups, listed using pagination:"); // All the above tests should include the created backup exactly once, i.e. exactly 6 times. assertThat(countOccurrences(out, backupName.toString())).isEqualTo(6); @@ -431,18 +432,19 @@ public void testBackupSamples_withoutEncryption() { try { System.out.println("Restore Backup ..."); out = runSample("restorebackup", restoreDatabaseId, backupId); - assertThat(out).contains( - "Restored database [" - + DatabaseName.of(projectId, instanceId, restoreDatabaseId).toString() - + "] from [" - + backupName - + "]"); + assertThat(out) + .contains( + "Restored database [" + + DatabaseName.of(projectId, instanceId, restoreDatabaseId).toString() + + "] from [" + + backupName + + "]"); restored = true; break; } catch (SpannerException e) { if (e.getErrorCode() == ErrorCode.FAILED_PRECONDITION && e.getMessage() - .contains("Please retry the operation once the pending restores complete")) { + .contains("Please retry the operation once the pending restores complete")) { restoreAttempts++; if (restoreAttempts == 10) { System.out.println( @@ -460,22 +462,22 @@ public void testBackupSamples_withoutEncryption() { if (restored) { System.out.println("List Database Operations ..."); out = runSample("listdatabaseoperations", restoreDatabaseId); - assertThat(out).contains( - String.format( - "Database %s restored from backup", - DatabaseId.of(dbId.getInstanceId(), restoreDatabaseId).getName())); + assertThat(out) + .contains( + String.format( + "Database %s restored from backup", + DatabaseId.of(dbId.getInstanceId(), restoreDatabaseId).getName())); } System.out.println("Updating backup ..."); out = runSample("updatebackup", databaseId, backupId); - assertThat(out).contains( - String.format("Updated backup [" + backupId + "]")); + assertThat(out).contains(String.format("Updated backup [" + backupId + "]")); // Drop the restored database before we try to delete the backup. // Otherwise the delete backup operation might fail as the backup is still in use by // the OptimizeRestoredDatabase operation. - databaseAdminClient.dropDatabase(DatabaseName.of(projectId, - dbId.getInstanceId().getInstance(), restoreDatabaseId)); + databaseAdminClient.dropDatabase( + DatabaseName.of(projectId, dbId.getInstanceId().getInstance(), restoreDatabaseId)); System.out.println("Deleting Backup ..."); out = runSample("deletebackup", databaseId, backupId); @@ -502,8 +504,7 @@ public void testCancelBackupSamples() { String backupId = idGenerator.generateBackupId(); out = runSample("cancelcreatebackup", databaseId, backupId); - assertThat(out).contains( - "Backup operation for [" + backupId + "_cancel] successfully"); + assertThat(out).contains("Backup operation for [" + backupId + "_cancel] successfully"); } catch (Exception ex) { Assert.fail("Exception raised => " + ex.getCause()); } @@ -519,38 +520,70 @@ public void testEncryptedDatabaseAndBackupSamples() throws Exception { String instanceId = idGenerator.generateInstanceId(); InstanceAdminClient instanceAdminClient = spanner.getInstanceAdminClient(); instanceAdminClient - .createInstance(InstanceInfo.newBuilder(InstanceId.of(projectId, instanceId)) - .setDisplayName("Encrypted test instance") - .setInstanceConfigId(InstanceConfigId.of(projectId, "regional-" + keyLocation)) - .setNodeCount(1).build()) + .createInstance( + InstanceInfo.newBuilder(InstanceId.of(projectId, instanceId)) + .setDisplayName("Encrypted test instance") + .setInstanceConfigId(InstanceConfigId.of(projectId, "regional-" + keyLocation)) + .setNodeCount(1) + .build()) .get(); try { - String out = SampleRunner - .runSample(() -> SpannerSample.createDatabase( - databaseAdminClient, InstanceName.of(projectId, instanceId), databaseId)); - assertThat(out).contains(String.format( - "Created database [%s]", DatabaseName.of(projectId, instanceId, databaseId))); - - out = SampleRunner.runSampleWithRetry( - () -> CreateBackupWithEncryptionKey.createBackupWithEncryptionKey(databaseAdminClient, - projectId, - instanceId, databaseId, encryptedBackupId, key), - new ShouldRetryBackupOperation()); - assertThat(out).containsMatch(String.format( - "Backup projects/%s/instances/%s/backups/%s of size \\d+ bytes " - + "was created at (.*) using encryption key %s", - projectId, instanceId, encryptedBackupId, key)); - - out = SampleRunner.runSampleWithRetry( - () -> RestoreBackupWithEncryptionKey.restoreBackupWithEncryptionKey(databaseAdminClient, - projectId, instanceId, encryptedBackupId, restoreId, key), - new ShouldRetryBackupOperation()); - assertThat(out).contains(String.format( - "Database projects/%s/instances/%s/databases/%s" - + " restored to projects/%s/instances/%s/databases/%s" - + " from backup projects/%s/instances/%s/backups/%s" + " using encryption key %s", - projectId, instanceId, databaseId, projectId, instanceId, restoreId, - projectId, instanceId, encryptedBackupId, key)); + String out = + SampleRunner.runSample( + () -> + SpannerSample.createDatabase( + databaseAdminClient, InstanceName.of(projectId, instanceId), databaseId)); + assertThat(out) + .contains( + String.format( + "Created database [%s]", DatabaseName.of(projectId, instanceId, databaseId))); + + out = + SampleRunner.runSampleWithRetry( + () -> + CreateBackupWithEncryptionKey.createBackupWithEncryptionKey( + databaseAdminClient, + projectId, + instanceId, + databaseId, + encryptedBackupId, + key), + new ShouldRetryBackupOperation()); + assertThat(out) + .containsMatch( + String.format( + "Backup projects/%s/instances/%s/backups/%s of size \\d+ bytes " + + "was created at (.*) using encryption key %s", + projectId, instanceId, encryptedBackupId, key)); + + out = + SampleRunner.runSampleWithRetry( + () -> + RestoreBackupWithEncryptionKey.restoreBackupWithEncryptionKey( + databaseAdminClient, + projectId, + instanceId, + encryptedBackupId, + restoreId, + key), + new ShouldRetryBackupOperation()); + assertThat(out) + .contains( + String.format( + "Database projects/%s/instances/%s/databases/%s" + + " restored to projects/%s/instances/%s/databases/%s" + + " from backup projects/%s/instances/%s/backups/%s" + + " using encryption key %s", + projectId, + instanceId, + databaseId, + projectId, + instanceId, + restoreId, + projectId, + instanceId, + encryptedBackupId, + key)); } finally { // Delete the backups from the test instance first, as the instance can only be deleted once // all backups have been deleted. @@ -566,20 +599,28 @@ public void testDeleteBackups() { String databaseId = idGenerator.generateDatabaseId(); String backupId = idGenerator.generateBackupId(); - String out = SampleRunner - .runSample(() -> SpannerSample.createDatabase( - databaseAdminClient, InstanceName.of(projectId, instanceId), databaseId)); - assertThat(out).contains(String.format( - "Created database [%s]", DatabaseName.of(projectId, instanceId, databaseId))); - - out = SampleRunner.runSampleWithRetry( - () -> CreateBackupWithEncryptionKey.createBackupWithEncryptionKey(databaseAdminClient, - projectId, instanceId, databaseId, backupId, key), - new ShouldRetryBackupOperation()); - assertThat(out).containsMatch(String.format( - "Backup projects/%s/instances/%s/backups/%s of size \\d+ bytes " - + "was created at (.*) using encryption key %s", - projectId, instanceId, backupId, key)); + String out = + SampleRunner.runSample( + () -> + SpannerSample.createDatabase( + databaseAdminClient, InstanceName.of(projectId, instanceId), databaseId)); + assertThat(out) + .contains( + String.format( + "Created database [%s]", DatabaseName.of(projectId, instanceId, databaseId))); + + out = + SampleRunner.runSampleWithRetry( + () -> + CreateBackupWithEncryptionKey.createBackupWithEncryptionKey( + databaseAdminClient, projectId, instanceId, databaseId, backupId, key), + new ShouldRetryBackupOperation()); + assertThat(out) + .containsMatch( + String.format( + "Backup projects/%s/instances/%s/backups/%s of size \\d+ bytes " + + "was created at (.*) using encryption key %s", + projectId, instanceId, backupId, key)); out = runSample("deletebackup", databaseId, backupId); assertThat(out).contains("Deleted backup [" + backupId + "]"); @@ -598,9 +639,11 @@ private static void deleteAllBackups(String instanceId) throws InterruptedExcept databaseAdminClient.deleteBackup(backup.getName()); break; } catch (SpannerException e) { - if (e.getErrorCode() == ErrorCode.FAILED_PRECONDITION && e.getMessage() - .contains("Please try deleting the backup once the restore or post-restore optimize " - + "operations have completed on these databases.")) { + if (e.getErrorCode() == ErrorCode.FAILED_PRECONDITION + && e.getMessage() + .contains( + "Please try deleting the backup once the restore or post-restore optimize " + + "operations have completed on these databases.")) { // Wait 30 seconds and then retry. Thread.sleep(30_000L); } else { @@ -622,25 +665,31 @@ private String runSampleRunnable(Runnable sample) { } @Test - public void testCreateInstanceSample() { + public void testCreateAndUpdateInstanceSample() { String databaseId = idGenerator.generateDatabaseId(); DatabaseId dbId = DatabaseId.of(projectId, instanceId, databaseId); String instanceId = formatForTest("sample-inst"); String out = - runSampleRunnable(() -> { - try { - CreateInstanceExample.createInstance( - dbId.getInstanceId().getProject(), instanceId); - } finally { - spanner.getInstanceAdminClient().deleteInstance(instanceId); - } - }); + runSampleRunnable( + () -> { + try { + CreateInstanceExample.createInstance(dbId.getInstanceId().getProject(), instanceId); + UpdateInstanceExample.updateInstance(dbId.getInstanceId().getProject(), instanceId); + } finally { + spanner.getInstanceAdminClient().deleteInstance(instanceId); + } + }); assertThat(out) .contains( String.format( "Instance %s was successfully created", InstanceId.of(dbId.getInstanceId().getProject(), instanceId))); + assertThat(out) + .contains( + String.format( + "Instance %s was successfully updated", + InstanceId.of(dbId.getInstanceId().getProject(), instanceId))); } private static int countOccurrences(String input, String search) { @@ -676,9 +725,12 @@ public boolean test(SpannerException e) { attempts++; if (attempts == MAX_ATTEMPTS) { // Throw custom exception so it is easier to locate in the log why it went wrong. - throw SpannerExceptionFactory.newSpannerException(ErrorCode.DEADLINE_EXCEEDED, - String.format("Operation failed %d times because of other pending operations. " - + "Giving up operation.\n", attempts), + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.DEADLINE_EXCEEDED, + String.format( + "Operation failed %d times because of other pending operations. " + + "Giving up operation.\n", + attempts), e); } // Wait one minute before retrying. diff --git a/samples/snippets/src/test/java/com/example/spanner/UpdateBackupScheduleSampleIT.java b/samples/snippets/src/test/java/com/example/spanner/UpdateBackupScheduleSampleIT.java new file mode 100644 index 00000000000..ea299571f5d --- /dev/null +++ b/samples/snippets/src/test/java/com/example/spanner/UpdateBackupScheduleSampleIT.java @@ -0,0 +1,52 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.spanner.admin.database.v1.BackupScheduleName; +import java.util.UUID; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class UpdateBackupScheduleSampleIT extends SampleTestBaseV2 { + // Default instance and given db should exist for tests to pass. + private static String databaseId = System.getProperty("spanner.sample.database", "mysample"); + + @Test + public void testUpdateBackupScheduleSample() throws Exception { + String backupScheduleId = String.format("schedule-%s", UUID.randomUUID()); + BackupScheduleName backupScheduleName = + BackupScheduleName.of(projectId, instanceId, databaseId, backupScheduleId); + String out = + SampleRunner.runSample( + () -> { + try { + CreateFullBackupScheduleSample.createFullBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId); + UpdateBackupScheduleSample.updateBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId); + } finally { + DeleteBackupScheduleSample.deleteBackupSchedule( + projectId, instanceId, databaseId, backupScheduleId); + } + }); + assertThat(out).contains(String.format("Updated backup schedule: %s", backupScheduleName)); + } +} diff --git a/versions.txt b/versions.txt index 61c5474879e..56e90cc4618 100644 --- a/versions.txt +++ b/versions.txt @@ -1,13 +1,13 @@ # Format: # module:released-version:current-version -proto-google-cloud-spanner-admin-instance-v1:6.66.0:6.66.1-SNAPSHOT -proto-google-cloud-spanner-v1:6.66.0:6.66.1-SNAPSHOT -proto-google-cloud-spanner-admin-database-v1:6.66.0:6.66.1-SNAPSHOT -grpc-google-cloud-spanner-v1:6.66.0:6.66.1-SNAPSHOT -grpc-google-cloud-spanner-admin-instance-v1:6.66.0:6.66.1-SNAPSHOT -grpc-google-cloud-spanner-admin-database-v1:6.66.0:6.66.1-SNAPSHOT -google-cloud-spanner:6.66.0:6.66.1-SNAPSHOT -google-cloud-spanner-executor:6.66.0:6.66.1-SNAPSHOT -proto-google-cloud-spanner-executor-v1:6.66.0:6.66.1-SNAPSHOT -grpc-google-cloud-spanner-executor-v1:6.66.0:6.66.1-SNAPSHOT +proto-google-cloud-spanner-admin-instance-v1:6.78.0:6.78.1-SNAPSHOT +proto-google-cloud-spanner-v1:6.78.0:6.78.1-SNAPSHOT +proto-google-cloud-spanner-admin-database-v1:6.78.0:6.78.1-SNAPSHOT +grpc-google-cloud-spanner-v1:6.78.0:6.78.1-SNAPSHOT +grpc-google-cloud-spanner-admin-instance-v1:6.78.0:6.78.1-SNAPSHOT +grpc-google-cloud-spanner-admin-database-v1:6.78.0:6.78.1-SNAPSHOT +google-cloud-spanner:6.78.0:6.78.1-SNAPSHOT +google-cloud-spanner-executor:6.78.0:6.78.1-SNAPSHOT +proto-google-cloud-spanner-executor-v1:6.78.0:6.78.1-SNAPSHOT +grpc-google-cloud-spanner-executor-v1:6.78.0:6.78.1-SNAPSHOT