com.google.cloud
google-cloud-spanner
diff --git a/google-cloud-spanner-executor/assembly-descriptor.xml b/google-cloud-spanner-executor/assembly-descriptor.xml
new file mode 100644
index 00000000000..8a9e7f8f500
--- /dev/null
+++ b/google-cloud-spanner-executor/assembly-descriptor.xml
@@ -0,0 +1,27 @@
+
+ jar-with-dependencies
+
+ jar
+
+ false
+
+
+ /
+ false
+ true
+
+
+ io.grpc.LoadBalancerProvider
+
+
+
+
+
+
+ ${project.build.outputDirectory}
+ .
+
+
+
diff --git a/google-cloud-spanner-executor/pom.xml b/google-cloud-spanner-executor/pom.xml
new file mode 100644
index 00000000000..70c3dd482c3
--- /dev/null
+++ b/google-cloud-spanner-executor/pom.xml
@@ -0,0 +1,165 @@
+
+
+ 4.0.0
+ com.google.cloud
+ google-cloud-spanner-executor
+ 6.34.2-SNAPSHOT
+ jar
+ Google Cloud Spanner Executor
+
+
+ com.google.cloud
+ google-cloud-spanner-parent
+ 6.34.2-SNAPSHOT
+
+
+
+ 1.8
+ 1.8
+ UTF-8
+
+
+
+
+ com.google.cloud
+ google-cloud-spanner
+
+
+ io.grpc
+ grpc-api
+
+
+ io.grpc
+ grpc-netty-shaded
+
+
+ io.grpc
+ grpc-stub
+
+
+ com.google.api
+ api-common
+
+
+ com.google.protobuf
+ protobuf-java
+
+
+ com.google.protobuf
+ protobuf-java-util
+
+
+ com.google.api.grpc
+ proto-google-common-protos
+
+
+ com.google.cloud
+ google-cloud-core
+
+
+ com.google.auth
+ google-auth-library-oauth2-http
+
+
+ com.google.http-client
+ google-http-client
+
+
+ com.google.api.grpc
+ proto-google-cloud-spanner-admin-instance-v1
+
+
+ com.google.api.grpc
+ proto-google-cloud-spanner-v1
+
+
+ com.google.api.grpc
+ proto-google-cloud-spanner-admin-database-v1
+
+
+ com.google.api.grpc
+ proto-google-cloud-spanner-executor-v1
+ 1.0.0
+
+
+ com.google.guava
+ guava
+
+
+ com.google.api
+ gax
+
+
+ com.google.api
+ gax-grpc
+
+
+ org.threeten
+ threetenbp
+
+
+ com.google.code.findbugs
+ jsr305
+
+
+ com.google.auth
+ google-auth-library-credentials
+
+
+ io.grpc
+ grpc-services
+
+
+ commons-cli
+ commons-cli
+ 1.5.0
+
+
+ commons-io
+ commons-io
+ 2.11.0
+
+
+
+ org.jetbrains
+ annotations
+ RELEASE
+ compile
+
+
+
+ google-spanner-cloud-executor
+
+
+ maven-assembly-plugin
+ 3.4.2
+
+
+ assembly-descriptor.xml
+
+
+
+ com.google.cloud.executor.spanner.WorkerProxy
+
+
+
+
+
+ make-assembly
+ package
+
+ single
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-failsafe-plugin
+ 3.0.0-M7
+
+
+
+
diff --git a/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudClientExecutor.java b/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudClientExecutor.java
new file mode 100644
index 00000000000..91ce0a23e2e
--- /dev/null
+++ b/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudClientExecutor.java
@@ -0,0 +1,3477 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.cloud.executor.spanner;
+
+import static com.google.cloud.spanner.TransactionRunner.TransactionCallable;
+
+import com.google.api.gax.longrunning.OperationFuture;
+import com.google.api.gax.paging.Page;
+import com.google.api.gax.retrying.RetrySettings;
+import com.google.api.gax.rpc.DeadlineExceededException;
+import com.google.api.gax.rpc.TransportChannelProvider;
+import com.google.api.gax.rpc.UnavailableException;
+import com.google.auth.Credentials;
+import com.google.auth.oauth2.GoogleCredentials;
+import com.google.cloud.ByteArray;
+import com.google.cloud.Date;
+import com.google.cloud.NoCredentials;
+import com.google.cloud.Timestamp;
+import com.google.cloud.spanner.Backup;
+import com.google.cloud.spanner.BatchClient;
+import com.google.cloud.spanner.BatchReadOnlyTransaction;
+import com.google.cloud.spanner.BatchTransactionId;
+import com.google.cloud.spanner.Database;
+import com.google.cloud.spanner.DatabaseAdminClient;
+import com.google.cloud.spanner.DatabaseClient;
+import com.google.cloud.spanner.DatabaseId;
+import com.google.cloud.spanner.ErrorCode;
+import com.google.cloud.spanner.Instance;
+import com.google.cloud.spanner.InstanceAdminClient;
+import com.google.cloud.spanner.InstanceConfig;
+import com.google.cloud.spanner.InstanceConfigId;
+import com.google.cloud.spanner.InstanceConfigInfo;
+import com.google.cloud.spanner.InstanceId;
+import com.google.cloud.spanner.InstanceInfo;
+import com.google.cloud.spanner.Key;
+import com.google.cloud.spanner.KeyRange;
+import com.google.cloud.spanner.KeySet;
+import com.google.cloud.spanner.Mutation;
+import com.google.cloud.spanner.Mutation.WriteBuilder;
+import com.google.cloud.spanner.Options;
+import com.google.cloud.spanner.Partition;
+import com.google.cloud.spanner.PartitionOptions;
+import com.google.cloud.spanner.ReadContext;
+import com.google.cloud.spanner.ReadOnlyTransaction;
+import com.google.cloud.spanner.ReplicaInfo;
+import com.google.cloud.spanner.ResultSet;
+import com.google.cloud.spanner.Spanner;
+import com.google.cloud.spanner.SpannerException;
+import com.google.cloud.spanner.SpannerExceptionFactory;
+import com.google.cloud.spanner.SpannerOptions;
+import com.google.cloud.spanner.Statement;
+import com.google.cloud.spanner.Struct;
+import com.google.cloud.spanner.StructReader;
+import com.google.cloud.spanner.TimestampBound;
+import com.google.cloud.spanner.TransactionContext;
+import com.google.cloud.spanner.TransactionRunner;
+import com.google.cloud.spanner.Type;
+import com.google.cloud.spanner.Value;
+import com.google.cloud.spanner.encryption.CustomerManagedEncryption;
+import com.google.cloud.spanner.v1.stub.SpannerStubSettings;
+import com.google.common.base.Function;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import com.google.longrunning.Operation;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.util.Timestamps;
+import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata;
+import com.google.spanner.admin.instance.v1.Instance.State;
+import com.google.spanner.executor.v1.AdminAction;
+import com.google.spanner.executor.v1.AdminResult;
+import com.google.spanner.executor.v1.BatchDmlAction;
+import com.google.spanner.executor.v1.BatchPartition;
+import com.google.spanner.executor.v1.CancelOperationAction;
+import com.google.spanner.executor.v1.ChangeStreamRecord;
+import com.google.spanner.executor.v1.ChildPartitionsRecord;
+import com.google.spanner.executor.v1.CloseBatchTransactionAction;
+import com.google.spanner.executor.v1.CloudBackupResponse;
+import com.google.spanner.executor.v1.CloudDatabaseResponse;
+import com.google.spanner.executor.v1.CloudInstanceConfigResponse;
+import com.google.spanner.executor.v1.CloudInstanceResponse;
+import com.google.spanner.executor.v1.Concurrency;
+import com.google.spanner.executor.v1.CopyCloudBackupAction;
+import com.google.spanner.executor.v1.CreateCloudBackupAction;
+import com.google.spanner.executor.v1.CreateCloudDatabaseAction;
+import com.google.spanner.executor.v1.CreateCloudInstanceAction;
+import com.google.spanner.executor.v1.CreateUserInstanceConfigAction;
+import com.google.spanner.executor.v1.DataChangeRecord;
+import com.google.spanner.executor.v1.DeleteCloudBackupAction;
+import com.google.spanner.executor.v1.DeleteCloudInstanceAction;
+import com.google.spanner.executor.v1.DeleteUserInstanceConfigAction;
+import com.google.spanner.executor.v1.DmlAction;
+import com.google.spanner.executor.v1.DropCloudDatabaseAction;
+import com.google.spanner.executor.v1.ExecuteChangeStreamQuery;
+import com.google.spanner.executor.v1.ExecutePartitionAction;
+import com.google.spanner.executor.v1.FinishTransactionAction;
+import com.google.spanner.executor.v1.FinishTransactionAction.Mode;
+import com.google.spanner.executor.v1.GenerateDbPartitionsForQueryAction;
+import com.google.spanner.executor.v1.GenerateDbPartitionsForReadAction;
+import com.google.spanner.executor.v1.GetCloudBackupAction;
+import com.google.spanner.executor.v1.GetCloudDatabaseAction;
+import com.google.spanner.executor.v1.GetCloudInstanceAction;
+import com.google.spanner.executor.v1.GetCloudInstanceConfigAction;
+import com.google.spanner.executor.v1.GetOperationAction;
+import com.google.spanner.executor.v1.HeartbeatRecord;
+import com.google.spanner.executor.v1.ListCloudBackupOperationsAction;
+import com.google.spanner.executor.v1.ListCloudBackupsAction;
+import com.google.spanner.executor.v1.ListCloudDatabaseOperationsAction;
+import com.google.spanner.executor.v1.ListCloudDatabasesAction;
+import com.google.spanner.executor.v1.ListCloudInstanceConfigsAction;
+import com.google.spanner.executor.v1.ListCloudInstancesAction;
+import com.google.spanner.executor.v1.MutationAction;
+import com.google.spanner.executor.v1.MutationAction.InsertArgs;
+import com.google.spanner.executor.v1.MutationAction.Mod;
+import com.google.spanner.executor.v1.MutationAction.UpdateArgs;
+import com.google.spanner.executor.v1.OperationResponse;
+import com.google.spanner.executor.v1.QueryAction;
+import com.google.spanner.executor.v1.ReadAction;
+import com.google.spanner.executor.v1.RestoreCloudDatabaseAction;
+import com.google.spanner.executor.v1.SpannerAction;
+import com.google.spanner.executor.v1.SpannerActionOutcome;
+import com.google.spanner.executor.v1.SpannerAsyncActionRequest;
+import com.google.spanner.executor.v1.SpannerAsyncActionResponse;
+import com.google.spanner.executor.v1.StartBatchTransactionAction;
+import com.google.spanner.executor.v1.StartTransactionAction;
+import com.google.spanner.executor.v1.UpdateCloudBackupAction;
+import com.google.spanner.executor.v1.UpdateCloudDatabaseDdlAction;
+import com.google.spanner.executor.v1.UpdateCloudInstanceAction;
+import com.google.spanner.v1.StructType;
+import com.google.spanner.v1.TypeAnnotationCode;
+import com.google.spanner.v1.TypeCode;
+import io.grpc.Status;
+import io.grpc.stub.StreamObserver;
+import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.Serializable;
+import java.math.BigDecimal;
+import java.text.ParseException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executor;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import java.util.stream.Collectors;
+import javax.annotation.Nullable;
+import org.apache.commons.io.FileUtils;
+import org.jetbrains.annotations.NotNull;
+import org.threeten.bp.Duration;
+import org.threeten.bp.LocalDate;
+
+/**
+ * Implementation of the SpannerExecutorProxy gRPC service that proxies action request through the
+ * Java Cloud Client.
+ */
+public class CloudClientExecutor extends CloudExecutor {
+
+ private static final Logger LOGGER = Logger.getLogger(CloudClientExecutor.class.getName());
+
+ // Prefix for host address.
+ private static final String HOST_PREFIX = "https://localhost:";
+
+ public CloudClientExecutor(boolean enableGrpcFaultInjector) {
+ this.enableGrpcFaultInjector = enableGrpcFaultInjector;
+ }
+
+ /**
+ * Implementation of a ReadWriteTransaction, which is a wrapper of the cloud TransactionRunner. It
+ * stores all the status and related variables from the start to finish, and control the running
+ * flow of this transaction.
+ *
+ * The following functions should be called on this struct externally:
+ *
+ *
startRWTransaction() initializes a transaction. It creates a callable and runs it with a
+ * ReadWriteTransaction() runner in a separate thread. That callable will accept a
+ * transactionContext when created, and we will pass it out to execute actions on it. Then the
+ * callable will be blocked until we need to exit (e.g. commit) the transaction.
+ *
+ *
getContext() returns the current ReadWriteTransaction context. Reads and writes can be
+ * performed on that object.
+ *
+ *
finish() is used to either commit or abandon the transaction. It gets a finishMode from
+ * finishAction and essentially unblocks the separate callable thread that's waiting inside
+ * ReadWriteTransaction(). As a result of this call, Spanner will commit the current transaction,
+ * abandon it without committing, or restart it, in which case the client should get a new
+ * transaction instance using getContext() and replay all the reads and writes through it.
+ *
+ *
Here's a typical workflow for how a read-write transaction works.
+ *
+ *
When we call startRWTransaction, a transaction runner will be started in another thread with
+ * a callable that stores the passed TransactionContext into the ReadWriteTransaction and blocks.
+ * This TransactionContext is used to run the read/write actions. To execute the finish action, we
+ * store the FinishMode in the ReadWriteTransaction object, which unblocks the thread in the
+ * callable and causes the callable to either return (to commit) or throw an exception (to abort).
+ * If the underlying Spanner transaction aborted, the transaction runner will invoke the callable
+ * again.
+ */
+ private static class ReadWriteTransaction {
+ private final DatabaseClient dbClient;
+ private TransactionRunner runner;
+ private TransactionContext txnContext;
+ private com.google.protobuf.Timestamp timestamp;
+ private Mode finishMode;
+ private SpannerException error;
+ private final String transactionSeed;
+ // Set to true when the transaction runner completed, one of these three could happen: runner
+ // committed, abandoned or threw an error.
+ private boolean runnerCompleted;
+
+ public ReadWriteTransaction(DatabaseClient dbClient, String transactionSeed) {
+ this.dbClient = dbClient;
+ this.transactionSeed = transactionSeed;
+ this.runnerCompleted = false;
+ }
+
+ /** Set context to be used for executing actions. */
+ private synchronized void setContext(TransactionContext transaction) {
+ finishMode = null;
+ txnContext = transaction;
+ Preconditions.checkNotNull(txnContext);
+ LOGGER.log(Level.INFO, "Transaction callable created, setting context %s\n", transactionSeed);
+ notifyAll();
+ }
+
+ /** Wait for finishAction to be executed and return the requested finish mode. */
+ private synchronized Mode waitForFinishAction() throws Exception {
+ while (finishMode == null) {
+ wait();
+ }
+ return finishMode;
+ }
+
+ /** Wait for transactionContext to be set. */
+ private synchronized void waitForTransactionContext() throws Exception {
+ while (txnContext == null && error == null) {
+ wait();
+ }
+ if (error != null) {
+ throw error;
+ }
+ }
+
+ /** Transaction successfully committed with a timestamp. */
+ private synchronized void transactionSucceeded(com.google.protobuf.Timestamp timestamp) {
+ this.timestamp = timestamp;
+ this.runnerCompleted = true;
+ notifyAll();
+ }
+
+ /** Transaction failed to commit, maybe abandoned or other errors occurred. */
+ private synchronized void transactionFailed(SpannerException e) {
+ // Handle abandon case
+ if (e.getErrorCode() == ErrorCode.UNKNOWN && e.getMessage().contains(TRANSACTION_ABANDONED)) {
+ LOGGER.log(Level.INFO, "Transaction abandoned");
+ } else {
+ // Store the error for sending back
+ error = e;
+ }
+ this.runnerCompleted = true;
+ notifyAll();
+ }
+
+ /** Return the commit timestamp. */
+ public synchronized com.google.protobuf.Timestamp getTimestamp() {
+ return timestamp;
+ }
+
+ /** Return the transactionContext to run actions. Must be called after start action. */
+ public synchronized TransactionContext getContext() {
+ Preconditions.checkState(txnContext != null);
+ return txnContext;
+ }
+
+ /**
+ * Create a new transaction runner and corresponding transaction callable to start a read-write
+ * transaction.
+ */
+ public void startRWTransaction() throws Exception {
+ final TransactionCallable callable =
+ transaction -> {
+ setContext(transaction);
+ LOGGER.log(
+ Level.INFO,
+ String.format(
+ "Transaction context set, executing and waiting for finish %s\n",
+ transactionSeed));
+ Mode mode = waitForFinishAction();
+ if (mode == Mode.ABANDON) {
+ throw new Exception(TRANSACTION_ABANDONED);
+ }
+ // Try to commit
+ return null;
+ };
+ Runnable runnable =
+ () -> {
+ try {
+ runner = dbClient.readWriteTransaction();
+ LOGGER.log(Level.INFO, String.format("Ready to run callable %s\n", transactionSeed));
+ runner.run(callable);
+ transactionSucceeded(runner.getCommitTimestamp().toProto());
+ } catch (SpannerException e) {
+ LOGGER.log(
+ Level.WARNING,
+ String.format("Transaction runnable failed with exception %s\n", e.getMessage()),
+ e);
+ transactionFailed(e);
+ }
+ };
+ LOGGER.log(
+ Level.INFO,
+ String.format("Callable and Runnable created, ready to execute %s\n", transactionSeed));
+ txnThreadPool.execute(runnable);
+ waitForTransactionContext();
+ LOGGER.log(
+ Level.INFO,
+ String.format("Transaction successfully created and running %s\n", transactionSeed));
+ }
+
+ /**
+ * Finish current transaction in given finishMode, if failed, throw the exception back to
+ * caller. Returns true if the transaction completed (i.e., committed), false if it was
+ * restarted by the transaction runner.
+ */
+ public synchronized boolean finish(Mode finishMode) throws Exception {
+ switch (finishMode) {
+ case COMMIT:
+ case ABANDON:
+ // Signal that finish action has been called and finishMode has been set.
+ this.finishMode = finishMode;
+ Preconditions.checkNotNull(finishMode);
+ txnContext = null;
+ LOGGER.log(
+ Level.INFO,
+ String.format(
+ "TxnContext cleared, sending finishMode to finish transaction %s\n",
+ transactionSeed));
+ notifyAll();
+ // Wait for the transaction to finish or restart
+ while (txnContext == null && !runnerCompleted) {
+ wait();
+ }
+ LOGGER.log(
+ Level.INFO,
+ String.format("Transaction finished, getting back to caller %s\n", transactionSeed));
+ if (txnContext != null) {
+ // Transaction restarted
+ return false;
+ } else if (error != null) {
+ // Transaction runner threw an exception: re-throw it to the client.
+ // Filter expected errors
+ if (error.getErrorCode() == ErrorCode.UNKNOWN
+ && error.getMessage().contains("Transaction outcome unknown")) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.DEADLINE_EXCEEDED, "Transaction outcome unknown.");
+ } else {
+ throw error;
+ }
+ }
+ // Transaction successfully completed
+ return true;
+ default:
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unsupported finish mode: " + finishMode);
+ }
+ }
+ }
+ /**
+ * All the context in which SpannerActions are executed. It stores the current running transaction
+ * and table metadata, shared by all the action executor and protected by a lock. There will only
+ * be exactly one instance of this class per stubby call, created when the executor is
+ * initialized.
+ */
+ class ExecutionFlowContext {
+ // Database path from previous action
+ private String prevDbPath;
+ // Current read-write transaction
+ private ReadWriteTransaction rwTxn;
+ // Current read-only transaction
+ private ReadOnlyTransaction roTxn;
+ // Current batch read-only transaction
+ private BatchReadOnlyTransaction batchTxn;
+ // Current database client
+ private DatabaseClient dbClient;
+ // Metadata info about table columns
+ private Metadata metadata;
+ // Number of pending read/query actions.
+ private int numPendingReads;
+ // Indicate whether there's a read/query action got aborted and the transaction need to be
+ // reset.
+ private boolean readAborted;
+ // Log the workid and op pair for tracing the thread.
+ private String transactionSeed;
+ // Outgoing stream.
+ StreamObserver responseObserver;
+
+ public ExecutionFlowContext(StreamObserver responseObserver) {
+ this.responseObserver = responseObserver;
+ }
+
+ /** Call the underlying stream to send response. */
+ public synchronized void onNext(SpannerAsyncActionResponse response) {
+ responseObserver.onNext(response);
+ }
+
+ /** Call the underlying stream to send error. */
+ public synchronized void onError(Throwable t) {
+ responseObserver.onError(t);
+ }
+
+ /** Return current transaction that can used for performing read/query actions. */
+ public synchronized ReadContext getTransactionForRead() throws SpannerException {
+ if (roTxn != null) {
+ return roTxn;
+ }
+ if (rwTxn != null) {
+ return rwTxn.getContext();
+ }
+ if (batchTxn != null) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Can't execute regular read in a batch transaction");
+ }
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "No active transaction");
+ }
+
+ /** Return current transaction that can used for performing mutation/update actions. */
+ public synchronized TransactionContext getTransactionForWrite() throws SpannerException {
+ if (rwTxn == null) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Not in a read-write transaction");
+ }
+ return rwTxn.getContext();
+ }
+
+ /** Return current batch transaction if it exists. */
+ public synchronized BatchReadOnlyTransaction getBatchTxn() throws SpannerException {
+ if (batchTxn == null) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Not in a batch transaction");
+ }
+ return batchTxn;
+ }
+
+ /** Set the transactionSeed string retrieved from startTransactionAction. */
+ public synchronized void updateTransactionSeed(String transactionSeed) {
+ if (!transactionSeed.isEmpty()) {
+ this.transactionSeed = transactionSeed;
+ }
+ }
+
+ /** Return current workid and op pair for logging. */
+ public synchronized String getTransactionSeed() {
+ return transactionSeed;
+ }
+
+ /** Return current database client. */
+ public DatabaseClient getDbClient() {
+ return dbClient;
+ }
+
+ /** Clear the transaction related variables. */
+ public synchronized void clear() {
+ rwTxn = null;
+ roTxn = null;
+ metadata = null;
+ }
+
+ /** Cleanup all the active transactions if the stubby call is closing. */
+ public synchronized void cleanup() {
+ if (roTxn != null) {
+ LOGGER.log(Level.INFO, "A read only transaction was active when stubby call closed");
+ roTxn.close();
+ }
+ if (rwTxn != null) {
+ LOGGER.log(Level.INFO, "A read write transaction was active when stubby call closed");
+ try {
+ rwTxn.finish(Mode.ABANDON);
+ } catch (Exception e) {
+ LOGGER.log(
+ Level.WARNING, "Failed to abandon a read-write transaction: " + e.getMessage());
+ }
+ }
+ }
+
+ /** Return previous databasePath if given dbPath is empty, then update. */
+ public synchronized String getDatabasePath(String dbPath) {
+ if (dbPath == null || dbPath.isEmpty()) {
+ return prevDbPath;
+ }
+ prevDbPath = dbPath;
+ return dbPath;
+ }
+
+ /** Set the metadata for future use. */
+ public synchronized void setMetadata(Metadata metadata) {
+ this.metadata = metadata;
+ }
+
+ /** Start a read-only transaction. */
+ public synchronized void startReadOnlyTxn(
+ DatabaseClient dbClient, TimestampBound timestampBound, Metadata metadata) {
+ if ((rwTxn != null) || (roTxn != null) || (batchTxn != null)) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Already in a transaction");
+ }
+ this.metadata = metadata;
+ if (timestampBound.getMode() == TimestampBound.Mode.MIN_READ_TIMESTAMP
+ || timestampBound.getMode() == TimestampBound.Mode.MAX_STALENESS) {
+ roTxn = dbClient.singleUseReadOnlyTransaction(timestampBound);
+ } else {
+ roTxn = dbClient.readOnlyTransaction(timestampBound);
+ }
+ }
+
+ /** Start a read-write transaction. */
+ public synchronized void startReadWriteTxn(DatabaseClient dbClient, Metadata metadata)
+ throws Exception {
+ if ((rwTxn != null) || (roTxn != null) || (batchTxn != null)) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Already in a transaction");
+ }
+ LOGGER.log(
+ Level.INFO,
+ String.format(
+ "There's no active transaction, safe to create rwTxn: %s\n", getTransactionSeed()));
+ this.metadata = metadata;
+ rwTxn = new ReadWriteTransaction(dbClient, transactionSeed);
+ LOGGER.log(
+ Level.INFO,
+ String.format(
+ "Read-write transaction object created, try to start: %s\n", getTransactionSeed()));
+ rwTxn.startRWTransaction();
+ }
+
+ /** Start a batch transaction. */
+ public synchronized Status startBatchTxn(
+ StartBatchTransactionAction action, BatchClient batchClient, OutcomeSender sender) {
+ try {
+ if ((rwTxn != null) || (roTxn != null) || (batchTxn != null)) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Already in a transaction");
+ }
+
+ if (action.hasBatchTxnTime()) {
+ TimestampBound timestampBound =
+ TimestampBound.ofReadTimestamp(Timestamp.fromProto(action.getBatchTxnTime()));
+ batchTxn = batchClient.batchReadOnlyTransaction(timestampBound);
+ } else if (action.hasTid()) {
+ BatchTransactionId tId = unmarshall(action.getTid());
+ batchTxn = batchClient.batchReadOnlyTransaction(tId);
+ } else {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Either timestamp or tid must be set");
+ }
+ SpannerActionOutcome outcome =
+ SpannerActionOutcome.newBuilder()
+ .setStatus(toProto(Status.OK))
+ .setBatchTxnId(marshall(batchTxn.getBatchTransactionId()))
+ .build();
+ initReadState();
+ return sender.sendOutcome(outcome);
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ }
+
+ /** Increase the read count when a read/query is issued. */
+ public synchronized void startRead() {
+ ++numPendingReads;
+ }
+
+ /**
+ * Decrease the read count when a read/query is finished, if status is aborted and there's no
+ * pending read/query, reset the transaction for retry.
+ */
+ public synchronized void finishRead(Status status) {
+ if (status.getCode() == Status.ABORTED.getCode()) {
+ readAborted = true;
+ }
+ --numPendingReads;
+ if (readAborted && numPendingReads <= 0) {
+ LOGGER.log(Level.FINE, "Transaction reset due to read/query abort");
+ readAborted = false;
+ }
+ }
+
+ /** Initialize the read count and aborted status when transaction started. */
+ public synchronized void initReadState() {
+ readAborted = false;
+ numPendingReads = 0;
+ }
+
+ /** Store the reference to the database client for future action use. */
+ public void setDatabaseClient(DatabaseClient client) {
+ dbClient = client;
+ }
+
+ /** Return a list of serviceKeyFile column types of the given table. */
+ public List getKeyColumnTypes(String tableName)
+ throws SpannerException {
+ Preconditions.checkNotNull(metadata);
+ return metadata.getKeyColumnTypes(tableName);
+ }
+
+ /** Return column type of the given table and column. */
+ public com.google.spanner.v1.Type getColumnType(String tableName, String columnName)
+ throws SpannerException {
+ Preconditions.checkNotNull(metadata);
+ return metadata.getColumnType(tableName, columnName);
+ }
+
+ /** Buffer a list of mutations in a read-write transaction. */
+ public synchronized void bufferMutations(List mutations) throws SpannerException {
+ getTransactionForWrite().buffer(mutations);
+ }
+
+ /** Execute a batch of updates in a read-write transaction. */
+ public synchronized long[] executeBatchDml(@NotNull List stmts)
+ throws SpannerException {
+ for (int i = 0; i < stmts.size(); i++) {
+ LOGGER.log(
+ Level.INFO, String.format("executeBatchDml [%d]: %s", i + 1, stmts.get(i).toString()));
+ }
+ return getTransactionForWrite()
+ .batchUpdate(stmts, Options.tag("batch-update-transaction-tag"));
+ }
+
+ /** Finish active transaction in given finishMode, then send outcome back to client. */
+ public synchronized Status finish(Mode finishMode, OutcomeSender sender) {
+ if (numPendingReads > 0) {
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.FAILED_PRECONDITION, "Reads pending when trying to finish")));
+ }
+ SpannerActionOutcome.Builder outcomeBuilder = SpannerActionOutcome.newBuilder();
+ outcomeBuilder.setStatus(toProto(Status.OK));
+ if (roTxn != null || rwTxn != null) {
+ try {
+ if (roTxn != null) {
+ // read-only transaction
+ Timestamp ts = roTxn.getReadTimestamp();
+ outcomeBuilder.setCommitTime(ts.toProto());
+ roTxn.close();
+ clear();
+ } else {
+ // read-write transaction
+ if (!rwTxn.finish(finishMode)) {
+ LOGGER.log(Level.FINE, "Transaction restarted");
+ outcomeBuilder.setTransactionRestarted(true);
+ } else {
+ LOGGER.log(Level.FINE, "Transaction finish successfully");
+ if (rwTxn.getTimestamp() != null) {
+ outcomeBuilder.setCommitTime(rwTxn.getTimestamp());
+ }
+ clear();
+ }
+ }
+ } catch (SpannerException e) {
+ outcomeBuilder.setStatus(toProto(toStatus(e)));
+ clear();
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ return sender.sendOutcome(outcomeBuilder.build());
+ } else if (batchTxn != null) {
+ return toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Can't commit/abort a batch transaction"));
+ } else {
+ return toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "No currently active transaction"));
+ }
+ }
+
+ /** Close active batch transaction. */
+ public synchronized void closeBatchTxn() throws SpannerException {
+ if (batchTxn == null) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Not in a batch transaction");
+ }
+ batchTxn.close();
+ }
+ }
+
+ private Spanner client;
+ private Spanner clientWithTimeout;
+
+ private static final String TRANSACTION_ABANDONED = "Fake error to abandon transaction";
+
+ // Read-write transaction thread pool
+ private static final Executor txnThreadPool =
+ Executors.newCachedThreadPool(
+ new ThreadFactoryBuilder().setNameFormat("txn-pool-%d").build());
+
+ // Action thread pool
+ private static final Executor actionThreadPool =
+ Executors.newCachedThreadPool(
+ new ThreadFactoryBuilder().setNameFormat("action-pool-%d").build());
+
+ private synchronized Spanner getClientWithTimeout(long timeoutSeconds) throws IOException {
+ if (clientWithTimeout != null) {
+ return clientWithTimeout;
+ }
+ clientWithTimeout = getClient(timeoutSeconds);
+ return clientWithTimeout;
+ }
+
+ private synchronized Spanner getClient() throws IOException {
+ if (client != null) {
+ return client;
+ }
+ client = getClient(/*timeoutSeconds=*/ 0);
+ return client;
+ }
+
+ // Return the spanner client, create one if not exists.
+ private synchronized Spanner getClient(long timeoutSeconds) throws IOException {
+ // Create a cloud spanner client
+ Credentials credentials;
+ if (WorkerProxy.serviceKeyFile.isEmpty()) {
+ credentials = NoCredentials.getInstance();
+ } else {
+ credentials =
+ GoogleCredentials.fromStream(
+ new ByteArrayInputStream(
+ FileUtils.readFileToByteArray(new File(WorkerProxy.serviceKeyFile))),
+ HTTP_TRANSPORT_FACTORY);
+ }
+
+ TransportChannelProvider channelProvider =
+ CloudUtil.newChannelProviderHelper(WorkerProxy.spannerPort);
+
+ Duration rpcTimeout = Duration.ofHours(1L);
+ if (timeoutSeconds > 0) {
+ rpcTimeout = Duration.ofSeconds(timeoutSeconds);
+ }
+ RetrySettings retrySettings =
+ RetrySettings.newBuilder()
+ .setInitialRetryDelay(Duration.ofSeconds(1))
+ .setRetryDelayMultiplier(1.3)
+ .setMaxRetryDelay(Duration.ofSeconds(32))
+ .setInitialRpcTimeout(rpcTimeout)
+ .setRpcTimeoutMultiplier(1.0)
+ .setMaxRpcTimeout(rpcTimeout)
+ .setTotalTimeout(rpcTimeout)
+ .build();
+
+ // Cloud Spanner Client does not support global retry settings,
+ // Thus, we need to add retry settings to each individual stub.
+ SpannerOptions.Builder optionsBuilder =
+ SpannerOptions.newBuilder()
+ .setProjectId(PROJECT_ID)
+ .setHost(HOST_PREFIX + WorkerProxy.spannerPort)
+ .setCredentials(credentials)
+ .setChannelProvider(channelProvider);
+
+ SpannerStubSettings.Builder stubSettingsBuilder =
+ optionsBuilder.getSpannerStubSettingsBuilder();
+
+ stubSettingsBuilder.executeSqlSettings().setRetrySettings(retrySettings);
+ stubSettingsBuilder.executeStreamingSqlSettings().setRetrySettings(retrySettings);
+ stubSettingsBuilder.readSettings().setRetrySettings(retrySettings);
+ stubSettingsBuilder.streamingReadSettings().setRetrySettings(retrySettings);
+ stubSettingsBuilder.commitSettings().setRetrySettings(retrySettings);
+ stubSettingsBuilder.executeBatchDmlSettings().setRetrySettings(retrySettings);
+ stubSettingsBuilder.partitionQuerySettings().setRetrySettings(retrySettings);
+ stubSettingsBuilder.partitionReadSettings().setRetrySettings(retrySettings);
+ stubSettingsBuilder.rollbackSettings().setRetrySettings(retrySettings);
+ stubSettingsBuilder.batchCreateSessionsSettings().setRetrySettings(retrySettings);
+ stubSettingsBuilder.beginTransactionSettings().setRetrySettings(retrySettings);
+ stubSettingsBuilder.createSessionSettings().setRetrySettings(retrySettings);
+ stubSettingsBuilder.getSessionSettings().setRetrySettings(retrySettings);
+ stubSettingsBuilder.deleteSessionSettings().setRetrySettings(retrySettings);
+
+ return optionsBuilder.build().getService();
+ }
+
+ /** Handle actions. */
+ public Status startHandlingRequest(
+ SpannerAsyncActionRequest req, ExecutionFlowContext executionContext) {
+ OutcomeSender outcomeSender = new OutcomeSender(req.getActionId(), executionContext);
+
+ if (!req.hasAction()) {
+ return outcomeSender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Invalid request")));
+ }
+ SpannerAction action = req.getAction();
+
+ // Update dbPath
+ String dbPath = executionContext.getDatabasePath(action.getDatabasePath());
+
+ actionThreadPool.execute(
+ () -> {
+ Status status = executeAction(outcomeSender, action, dbPath, executionContext);
+ if (!status.isOk()) {
+ LOGGER.log(
+ Level.WARNING,
+ String.format("Failed to execute action with error: %s\n%s", status, action));
+ executionContext.onError(status.getCause());
+ }
+ });
+ return Status.OK;
+ }
+
+ /** Execute actions by action case, using OutcomeSender to send status and results back. */
+ private Status executeAction(
+ OutcomeSender outcomeSender,
+ SpannerAction action,
+ String dbPath,
+ ExecutionFlowContext executionContext) {
+
+ try {
+ if (action.hasAdmin()) {
+ return executeAdminAction(action.getAdmin(), outcomeSender);
+ } else if (action.hasStart()) {
+ if (dbPath == null) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Database path must be set for this action");
+ }
+ DatabaseClient dbClient = getClient().getDatabaseClient(DatabaseId.of(dbPath));
+ return executeStartTxn(action.getStart(), dbClient, outcomeSender, executionContext);
+ } else if (action.hasFinish()) {
+ return executeFinishTxn(action.getFinish(), outcomeSender, executionContext);
+ } else if (action.hasMutation()) {
+ return executeMutation(
+ action.getMutation(), outcomeSender, executionContext, /*isWrite=*/ false);
+ } else if (action.hasRead()) {
+ return executeRead(action.getRead(), outcomeSender, executionContext);
+ } else if (action.hasQuery()) {
+ return executeQuery(action.getQuery(), outcomeSender, executionContext);
+ } else if (action.hasDml()) {
+ return executeCloudDmlUpdate(action.getDml(), outcomeSender, executionContext);
+ } else if (action.hasBatchDml()) {
+ return executeCloudBatchDmlUpdates(action.getBatchDml(), outcomeSender, executionContext);
+ } else if (action.hasWrite()) {
+ return executeMutation(
+ action.getWrite().getMutation(), outcomeSender, executionContext, /*isWrite=*/ true);
+ } else if (action.hasStartBatchTxn()) {
+ if (dbPath == null) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "database path must be set for this action");
+ }
+ BatchClient batchClient = getClient().getBatchClient(DatabaseId.of(dbPath));
+ return executeStartBatchTxn(
+ action.getStartBatchTxn(), batchClient, outcomeSender, executionContext);
+ } else if (action.hasGenerateDbPartitionsRead()) {
+ return executeGenerateDbPartitionsRead(
+ action.getGenerateDbPartitionsRead(), outcomeSender, executionContext);
+ } else if (action.hasGenerateDbPartitionsQuery()) {
+ return executeGenerateDbPartitionsQuery(
+ action.getGenerateDbPartitionsQuery(), outcomeSender, executionContext);
+ } else if (action.hasExecutePartition()) {
+ return executeExecutePartition(
+ action.getExecutePartition(), outcomeSender, executionContext);
+ } else if (action.hasCloseBatchTxn()) {
+ return executeCloseBatchTxn(action.getCloseBatchTxn(), outcomeSender, executionContext);
+ } else if (action.hasExecuteChangeStreamQuery()) {
+ if (dbPath == null) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Database path must be set for this action");
+ }
+ return executeExecuteChangeStreamQuery(
+ dbPath, action.getExecuteChangeStreamQuery(), outcomeSender);
+ } else {
+ return outcomeSender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.UNIMPLEMENTED, "Not implemented yet: \n" + action)));
+ }
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return outcomeSender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ }
+
+ /** Execute admin actions by action case, using OutcomeSender to send status and results back. */
+ private Status executeAdminAction(AdminAction action, OutcomeSender outcomeSender) {
+ try {
+ if (action.hasCreateCloudInstance()) {
+ return executeCreateCloudInstance(action.getCreateCloudInstance(), outcomeSender);
+ } else if (action.hasUpdateCloudInstance()) {
+ return executeUpdateCloudInstance(action.getUpdateCloudInstance(), outcomeSender);
+ } else if (action.hasDeleteCloudInstance()) {
+ return executeDeleteCloudInstance(action.getDeleteCloudInstance(), outcomeSender);
+ } else if (action.hasListCloudInstances()) {
+ return executeListCloudInstances(action.getListCloudInstances(), outcomeSender);
+ } else if (action.hasListInstanceConfigs()) {
+ return executeListCloudInstanceConfigs(action.getListInstanceConfigs(), outcomeSender);
+ } else if (action.hasGetCloudInstanceConfig()) {
+ return executeGetCloudInstanceConfig(action.getGetCloudInstanceConfig(), outcomeSender);
+ } else if (action.hasGetCloudInstance()) {
+ return executeGetCloudInstance(action.getGetCloudInstance(), outcomeSender);
+ } else if (action.hasCreateUserInstanceConfig()) {
+ return executeCreateUserInstanceConfig(action.getCreateUserInstanceConfig(), outcomeSender);
+ } else if (action.hasDeleteUserInstanceConfig()) {
+ return executeDeleteUserInstanceConfig(action.getDeleteUserInstanceConfig(), outcomeSender);
+ } else if (action.hasCreateCloudDatabase()) {
+ return executeCreateCloudDatabase(action.getCreateCloudDatabase(), outcomeSender);
+ } else if (action.hasUpdateCloudDatabaseDdl()) {
+ return executeUpdateCloudDatabaseDdl(action.getUpdateCloudDatabaseDdl(), outcomeSender);
+ } else if (action.hasDropCloudDatabase()) {
+ return executeDropCloudDatabase(action.getDropCloudDatabase(), outcomeSender);
+ } else if (action.hasCreateCloudBackup()) {
+ return executeCreateCloudBackup(action.getCreateCloudBackup(), outcomeSender);
+ } else if (action.hasCopyCloudBackup()) {
+ return executeCopyCloudBackup(action.getCopyCloudBackup(), outcomeSender);
+ } else if (action.hasGetCloudBackup()) {
+ return executeGetCloudBackup(action.getGetCloudBackup(), outcomeSender);
+ } else if (action.hasUpdateCloudBackup()) {
+ return executeUpdateCloudBackup(action.getUpdateCloudBackup(), outcomeSender);
+ } else if (action.hasDeleteCloudBackup()) {
+ return executeDeleteCloudBackup(action.getDeleteCloudBackup(), outcomeSender);
+ } else if (action.hasListCloudBackups()) {
+ return executeListCloudBackups(action.getListCloudBackups(), outcomeSender);
+ } else if (action.hasListCloudBackupOperations()) {
+ return executeListCloudBackupOperations(
+ action.getListCloudBackupOperations(), outcomeSender);
+ } else if (action.hasListCloudDatabases()) {
+ return executeListCloudDatabases(action.getListCloudDatabases(), outcomeSender);
+ } else if (action.hasListCloudDatabaseOperations()) {
+ return executeListCloudDatabaseOperations(
+ action.getListCloudDatabaseOperations(), outcomeSender);
+ } else if (action.hasRestoreCloudDatabase()) {
+ return executeRestoreCloudDatabase(action.getRestoreCloudDatabase(), outcomeSender);
+ } else if (action.hasGetCloudDatabase()) {
+ return executeGetCloudDatabase(action.getGetCloudDatabase(), outcomeSender);
+ } else if (action.hasGetOperation()) {
+ return executeGetOperation(action.getGetOperation(), outcomeSender);
+ } else if (action.hasCancelOperation()) {
+ return executeCancelOperation(action.getCancelOperation(), outcomeSender);
+ } else {
+ return outcomeSender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.UNIMPLEMENTED, "Not implemented yet: \n" + action)));
+ }
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return outcomeSender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ }
+
+ /** Execute action that creates a cloud instance. */
+ private Status executeCreateCloudInstance(
+ CreateCloudInstanceAction action, OutcomeSender sender) {
+ try {
+ LOGGER.log(Level.INFO, String.format("Creating instance: \n%s", action));
+ InstanceAdminClient instanceAdminClient = getClient().getInstanceAdminClient();
+ final String instanceId = action.getInstanceId();
+ InstanceId instance = InstanceId.of(action.getProjectId(), instanceId);
+ InstanceInfo.Builder builder =
+ InstanceInfo.newBuilder(instance)
+ .setInstanceConfigId(
+ InstanceConfigId.of(action.getProjectId(), action.getInstanceConfigId()))
+ .setDisplayName(instanceId)
+ .putAllLabels(action.getLabelsMap());
+ if (action.hasNodeCount()) {
+ builder.setNodeCount(action.getNodeCount());
+ }
+ if (action.hasProcessingUnits()) {
+ builder.setProcessingUnits(action.getProcessingUnits());
+ }
+ final InstanceInfo request = builder.build();
+ instanceAdminClient.createInstance(request).get();
+ } catch (ExecutionException | InterruptedException ex) {
+ SpannerException e = SpannerExceptionFactory.newSpannerException(ex);
+ if (e.getErrorCode() == ErrorCode.ALREADY_EXISTS) {
+ // Another worker or our previous attempt already created the instance.
+ return sender.finishWithOK();
+ }
+ return sender.finishWithError(toStatus(e));
+ } catch (SpannerException se) {
+ return sender.finishWithError(toStatus(se));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ return sender.finishWithOK();
+ }
+
+ /** Execute action that updates a cloud instance. */
+ private Status executeUpdateCloudInstance(
+ UpdateCloudInstanceAction action, OutcomeSender sender) {
+ try {
+ LOGGER.log(Level.INFO, String.format("Updating instance: \n%s", action));
+ InstanceAdminClient instanceAdminClient = getClient().getInstanceAdminClient();
+ final String instanceId = action.getInstanceId();
+ final InstanceId instance = InstanceId.of(action.getProjectId(), instanceId);
+ final InstanceInfo.Builder builder = InstanceInfo.newBuilder(instance);
+
+ ArrayList fieldsToUpdate = new ArrayList<>();
+ if (action.hasDisplayName()) {
+ fieldsToUpdate.add(InstanceInfo.InstanceField.DISPLAY_NAME);
+ builder.setDisplayName(instanceId);
+ }
+ if (action.hasNodeCount()) {
+ fieldsToUpdate.add(InstanceInfo.InstanceField.NODE_COUNT);
+ builder.setNodeCount(action.getNodeCount());
+ }
+ if (action.hasProcessingUnits()) {
+ fieldsToUpdate.add(InstanceInfo.InstanceField.PROCESSING_UNITS);
+ builder.setProcessingUnits(action.getProcessingUnits());
+ }
+ Map labels = action.getLabelsMap();
+ if (!labels.isEmpty()) {
+ fieldsToUpdate.add(InstanceInfo.InstanceField.LABELS);
+ builder.putAllLabels(action.getLabelsMap());
+ }
+ final InstanceInfo request = builder.build();
+ instanceAdminClient
+ .updateInstance(request, fieldsToUpdate.toArray(new InstanceInfo.InstanceField[0]))
+ .get();
+ } catch (ExecutionException | InterruptedException ex) {
+ SpannerException e = SpannerExceptionFactory.newSpannerException(ex);
+ return sender.finishWithError(toStatus(e));
+ } catch (SpannerException se) {
+ return sender.finishWithError(toStatus(se));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ return sender.finishWithOK();
+ }
+
+ /** Execute action that deletes a cloud instance. */
+ private Status executeDeleteCloudInstance(
+ DeleteCloudInstanceAction action, OutcomeSender sender) {
+ try {
+ LOGGER.log(Level.INFO, String.format("Deleting instance: \n%s", action));
+ InstanceAdminClient instanceAdminClient = getClient().getInstanceAdminClient();
+ final String instanceId = action.getInstanceId();
+ final InstanceId instance = InstanceId.of(action.getProjectId(), instanceId);
+ instanceAdminClient.deleteInstance(instance.getInstance());
+ } catch (SpannerException se) {
+ return sender.finishWithError(toStatus(se));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ return sender.finishWithOK();
+ }
+
+ /** Execute action that lists cloud instances. */
+ private Status executeListCloudInstances(ListCloudInstancesAction action, OutcomeSender sender) {
+ try {
+ LOGGER.log(Level.INFO, String.format("Listing instances:\n%s", action));
+ ArrayList options = new ArrayList<>();
+ if (action.hasPageSize()) {
+ options.add(Options.pageSize(action.getPageSize()));
+ }
+ if (action.hasFilter()) {
+ options.add(Options.filter(action.getFilter()));
+ }
+ if (action.hasPageToken()) {
+ options.add(Options.pageToken(action.getPageToken()));
+ }
+
+ Page response =
+ getClient()
+ .getInstanceAdminClient()
+ .listInstances(options.toArray(new Options.ListOption[0]));
+ List instanceList = new ArrayList<>();
+ for (Instance instance : response.iterateAll()) {
+ instanceList.add(instanceToProto(instance));
+ }
+ SpannerActionOutcome outcome =
+ SpannerActionOutcome.newBuilder()
+ .setStatus(toProto(Status.OK))
+ .setAdminResult(
+ AdminResult.newBuilder()
+ .setInstanceResponse(
+ CloudInstanceResponse.newBuilder()
+ .addAllListedInstances(instanceList)
+ .setNextPageToken("")
+ .build()))
+ .build();
+ return sender.sendOutcome(outcome);
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ }
+
+ /** Execute action that lists cloud instance configs. */
+ private Status executeListCloudInstanceConfigs(
+ ListCloudInstanceConfigsAction action, OutcomeSender sender) {
+ LOGGER.log(Level.INFO, String.format("Listing instance configs:\n%s", action));
+ ArrayList options = new ArrayList<>();
+ if (action.hasPageSize()) {
+ options.add(Options.pageSize(action.getPageSize()));
+ }
+ if (action.hasPageToken()) {
+ options.add(Options.pageToken(action.getPageToken()));
+ }
+ try {
+ Page response =
+ getClient()
+ .getInstanceAdminClient()
+ .listInstanceConfigs(options.toArray(new Options.ListOption[0]));
+ List instanceConfigList =
+ new ArrayList<>();
+ for (InstanceConfig instanceConfig : response.iterateAll()) {
+ instanceConfigList.add(instanceConfigToProto(instanceConfig));
+ }
+ SpannerActionOutcome outcome =
+ SpannerActionOutcome.newBuilder()
+ .setStatus(toProto(Status.OK))
+ .setAdminResult(
+ AdminResult.newBuilder()
+ .setInstanceConfigResponse(
+ CloudInstanceConfigResponse.newBuilder()
+ .addAllListedInstanceConfigs(instanceConfigList)
+ .setNextPageToken("")
+ .build()))
+ .build();
+ return sender.sendOutcome(outcome);
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ }
+
+ /** Execute action that gets a cloud instance config. */
+ private Status executeGetCloudInstanceConfig(
+ GetCloudInstanceConfigAction action, OutcomeSender sender) {
+ LOGGER.log(Level.INFO, String.format("Getting instance config:\n%s", action));
+ try {
+ InstanceConfig instanceConfig =
+ getClient().getInstanceAdminClient().getInstanceConfig(action.getInstanceConfigId());
+ SpannerActionOutcome outcome =
+ SpannerActionOutcome.newBuilder()
+ .setStatus(toProto(Status.OK))
+ .setAdminResult(
+ AdminResult.newBuilder()
+ .setInstanceConfigResponse(
+ CloudInstanceConfigResponse.newBuilder()
+ .setInstanceConfig(instanceConfigToProto(instanceConfig))
+ .build()))
+ .build();
+ return sender.sendOutcome(outcome);
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ }
+
+ /** Execute action that retrieves a cloud instance. */
+ private Status executeGetCloudInstance(GetCloudInstanceAction action, OutcomeSender sender) {
+ try {
+ LOGGER.log(Level.INFO, String.format("Retrieving instance:\n%s", action));
+ Instance instance = getClient().getInstanceAdminClient().getInstance(action.getInstanceId());
+ SpannerActionOutcome outcome =
+ SpannerActionOutcome.newBuilder()
+ .setStatus(toProto(Status.OK))
+ .setAdminResult(
+ AdminResult.newBuilder()
+ .setInstanceResponse(
+ CloudInstanceResponse.newBuilder()
+ .setInstance(instanceToProto(instance))
+ .build()))
+ .build();
+ return sender.sendOutcome(outcome);
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ }
+
+ /** Execute action that creates a user instance config. */
+ private Status executeCreateUserInstanceConfig(
+ CreateUserInstanceConfigAction action, OutcomeSender sender) {
+ try {
+ LOGGER.log(Level.INFO, String.format("Creating user instance config:\n%s", action));
+ final InstanceConfig baseConfig =
+ getClient().getInstanceAdminClient().getInstanceConfig(action.getBaseConfigId());
+ InstanceConfigInfo instanceConfigInfo =
+ InstanceConfig.newBuilder(
+ InstanceConfigId.of(action.getProjectId(), action.getUserConfigId()), baseConfig)
+ .setDisplayName(action.getUserConfigId())
+ .addReadOnlyReplicas(baseConfig.getOptionalReplicas())
+ .build();
+ getClient().getInstanceAdminClient().createInstanceConfig(instanceConfigInfo).get();
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ return sender.finishWithOK();
+ }
+
+ /** Execute action that deletes a user instance config. */
+ private Status executeDeleteUserInstanceConfig(
+ DeleteUserInstanceConfigAction action, OutcomeSender sender) {
+ try {
+ LOGGER.log(Level.INFO, String.format("Deleting user instance config:\n%s", action));
+ getClient().getInstanceAdminClient().deleteInstanceConfig(action.getUserConfigId());
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ return sender.finishWithOK();
+ }
+
+ /** Execute action that creates a cloud custom encrypted database. */
+ private Status executeCreateCloudCustomEncryptedDatabase(
+ CreateCloudDatabaseAction action, OutcomeSender sender) {
+ try {
+ LOGGER.log(Level.INFO, String.format("Creating database: \n%s", action));
+ Database dbInfo =
+ getClient()
+ .getDatabaseAdminClient()
+ .newDatabaseBuilder(
+ DatabaseId.of(
+ action.getProjectId(), action.getInstanceId(), action.getDatabaseId()))
+ .setEncryptionConfig(
+ CustomerManagedEncryption.fromProtoOrNull(action.getEncryptionConfig()))
+ .build();
+ getClient().getDatabaseAdminClient().createDatabase(dbInfo, action.getSdlStatementList());
+ } catch (SpannerException se) {
+ return sender.finishWithError(toStatus(se));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ return sender.finishWithOK();
+ }
+
+ /** Execute action that creates a cloud database. */
+ private Status executeCreateCloudDatabase(
+ CreateCloudDatabaseAction action, OutcomeSender sender) {
+ if (action.hasEncryptionConfig()) {
+ return executeCreateCloudCustomEncryptedDatabase(action, sender);
+ }
+ try {
+ LOGGER.log(Level.INFO, String.format("Creating database: \n%s", action));
+ final String instanceId = action.getInstanceId();
+ final String databaseId = action.getDatabaseId();
+ getClient()
+ .getDatabaseAdminClient()
+ .createDatabase(instanceId, databaseId, action.getSdlStatementList())
+ .get();
+ } catch (ExecutionException | InterruptedException ex) {
+ SpannerException e = SpannerExceptionFactory.newSpannerException(ex);
+ if (e.getErrorCode() == ErrorCode.ALREADY_EXISTS) {
+ // Client does not retry database, but we assume that no other process has created
+ // the database with a different schema (another instance of a worker may have
+ // created it with the same schema).
+ return sender.finishWithOK();
+ }
+ return sender.finishWithError(toStatus(e));
+ } catch (SpannerException se) {
+ return sender.finishWithError(toStatus(se));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ return sender.finishWithOK();
+ }
+
+ /** Execute action that updates a cloud database. */
+ private Status executeUpdateCloudDatabaseDdl(
+ UpdateCloudDatabaseDdlAction action, OutcomeSender sender) {
+ try {
+ LOGGER.log(Level.INFO, String.format("Updating database: \n%s", action));
+ DatabaseAdminClient dbAdminClient = getClient().getDatabaseAdminClient();
+ final String instanceId = action.getInstanceId();
+ final String databaseId = action.getDatabaseId();
+ UpdateDatabaseDdlMetadata metadata;
+ OperationFuture updateOp =
+ dbAdminClient.updateDatabaseDdl(
+ instanceId, databaseId, action.getSdlStatementList(), action.getOperationId());
+ updateOp.get();
+ metadata = updateOp.getMetadata().get();
+ int tsCount = metadata.getCommitTimestampsCount();
+ // Fetch the last timestamp
+ sender.setTimestamp(metadata.getCommitTimestamps(tsCount - 1));
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ LOGGER.log(
+ Level.WARNING,
+ "Unexpected error executing DDL: "
+ + String.join("; ", action.getSdlStatementList())
+ + " "
+ + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ return sender.finishWithOK();
+ }
+
+ /** Execute action that updates a cloud database. */
+ private Status executeDropCloudDatabase(DropCloudDatabaseAction action, OutcomeSender sender) {
+ try {
+ LOGGER.log(Level.INFO, String.format("Dropping database: \n%s", action));
+ DatabaseAdminClient dbAdminClient = getClient().getDatabaseAdminClient();
+ final String instanceId = action.getInstanceId();
+ final String databaseId = action.getDatabaseId();
+ dbAdminClient.dropDatabase(instanceId, databaseId);
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ return sender.finishWithOK();
+ }
+
+ /** Execute action that creates a cloud database backup. */
+ private Status executeCreateCloudBackup(CreateCloudBackupAction action, OutcomeSender sender) {
+ try {
+ LOGGER.log(Level.INFO, String.format("Creating backup: \n%s", action));
+ Backup backupResult =
+ getClient()
+ .getDatabaseAdminClient()
+ .createBackup(
+ action.getInstanceId(),
+ action.getBackupId(),
+ action.getDatabaseId(),
+ Timestamp.fromProto(action.getExpireTime()))
+ .get();
+ SpannerActionOutcome outcome =
+ SpannerActionOutcome.newBuilder()
+ .setStatus(toProto(Status.OK))
+ .setAdminResult(
+ AdminResult.newBuilder()
+ .setBackupResponse(
+ CloudBackupResponse.newBuilder()
+ .setBackup(backupResult.getProto())
+ .build()))
+ .build();
+ return sender.sendOutcome(outcome);
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ }
+
+ /** Execute action that copies a cloud database backup. */
+ private Status executeCopyCloudBackup(CopyCloudBackupAction action, OutcomeSender sender) {
+ try {
+ LOGGER.log(Level.INFO, String.format("Copying backup: \n%s", action));
+ Backup backupResult =
+ getClient()
+ .getDatabaseAdminClient()
+ .copyBackup(
+ action.getInstanceId(),
+ action.getBackupId(),
+ action.getSourceBackup(),
+ Timestamp.fromProto(action.getExpireTime()))
+ .get();
+ SpannerActionOutcome outcome =
+ SpannerActionOutcome.newBuilder()
+ .setStatus(toProto(Status.OK))
+ .setAdminResult(
+ AdminResult.newBuilder()
+ .setBackupResponse(
+ CloudBackupResponse.newBuilder()
+ .setBackup(backupResult.getProto())
+ .build()))
+ .build();
+ return sender.sendOutcome(outcome);
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ }
+
+ /** Execute action that gets a cloud database backup. */
+ private Status executeGetCloudBackup(GetCloudBackupAction action, OutcomeSender sender) {
+ try {
+ LOGGER.log(Level.INFO, String.format("Getting backup: \n%s", action));
+ Backup backupResult =
+ getClient()
+ .getDatabaseAdminClient()
+ .getBackup(action.getInstanceId(), action.getBackupId());
+ SpannerActionOutcome outcome =
+ SpannerActionOutcome.newBuilder()
+ .setStatus(toProto(Status.OK))
+ .setAdminResult(
+ AdminResult.newBuilder()
+ .setBackupResponse(
+ CloudBackupResponse.newBuilder()
+ .setBackup(backupResult.getProto())
+ .build()))
+ .build();
+ return sender.sendOutcome(outcome);
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ }
+
+ /** Execute action that updates a cloud database backup. */
+ private Status executeUpdateCloudBackup(UpdateCloudBackupAction action, OutcomeSender sender) {
+ try {
+ LOGGER.log(Level.INFO, String.format("Updating backup: \n%s", action));
+ Backup backupResult =
+ getClient()
+ .getDatabaseAdminClient()
+ .updateBackup(
+ action.getInstanceId(),
+ action.getBackupId(),
+ Timestamp.fromProto(action.getExpireTime()));
+ SpannerActionOutcome outcome =
+ SpannerActionOutcome.newBuilder()
+ .setStatus(toProto(Status.OK))
+ .setAdminResult(
+ AdminResult.newBuilder()
+ .setBackupResponse(
+ CloudBackupResponse.newBuilder()
+ .setBackup(backupResult.getProto())
+ .build()))
+ .build();
+ return sender.sendOutcome(outcome);
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ }
+
+ /** Execute action that deletes a cloud database backup. */
+ private Status executeDeleteCloudBackup(DeleteCloudBackupAction action, OutcomeSender sender) {
+ try {
+ LOGGER.log(Level.INFO, "Deleting backup: \n%s", action);
+ getClient()
+ .getDatabaseAdminClient()
+ .deleteBackup(action.getInstanceId(), action.getBackupId());
+ return sender.finishWithOK();
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ }
+
+ /** Execute action that lists cloud database backups. */
+ private Status executeListCloudBackups(ListCloudBackupsAction action, OutcomeSender sender) {
+ try {
+ LOGGER.log(Level.INFO, String.format("Listing backup: \n%s", action));
+ Page response =
+ getClient()
+ .getDatabaseAdminClient()
+ .listBackups(
+ action.getInstanceId(),
+ Options.pageSize(action.getPageSize()),
+ Options.filter(action.getFilter()),
+ Options.pageToken(action.getPageToken()));
+ List backupList = new ArrayList<>();
+ for (Backup backup : response.iterateAll()) {
+ backupList.add(backup.getProto());
+ }
+ SpannerActionOutcome outcome =
+ SpannerActionOutcome.newBuilder()
+ .setStatus(toProto(Status.OK))
+ .setAdminResult(
+ AdminResult.newBuilder()
+ .setBackupResponse(
+ CloudBackupResponse.newBuilder()
+ .addAllListedBackups(backupList)
+ .setNextPageToken("")
+ .build()))
+ .build();
+ return sender.sendOutcome(outcome);
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ }
+
+ /** Execute action that lists cloud database backup operations. */
+ private Status executeListCloudBackupOperations(
+ ListCloudBackupOperationsAction action, OutcomeSender sender) {
+ try {
+ LOGGER.log(Level.INFO, String.format("Listing backup operation: \n%s", action));
+
+ Page response =
+ getClient()
+ .getDatabaseAdminClient()
+ .listBackupOperations(
+ action.getInstanceId(),
+ Options.pageSize(action.getPageSize()),
+ Options.filter(action.getFilter()),
+ Options.pageToken(action.getPageToken()));
+ SpannerActionOutcome outcome =
+ SpannerActionOutcome.newBuilder()
+ .setStatus(toProto(Status.OK))
+ .setAdminResult(
+ AdminResult.newBuilder()
+ .setBackupResponse(
+ CloudBackupResponse.newBuilder()
+ .addAllListedBackupOperations(response.iterateAll())
+ .setNextPageToken("")
+ .build()))
+ .build();
+ return sender.sendOutcome(outcome);
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ }
+
+ /** Execute action that list cloud databases. */
+ private Status executeListCloudDatabases(ListCloudDatabasesAction action, OutcomeSender sender) {
+ try {
+ LOGGER.log(Level.INFO, String.format("Listing database: \n%s", action));
+ Page response =
+ getClient()
+ .getDatabaseAdminClient()
+ .listDatabases(
+ action.getInstanceId(),
+ Options.pageSize(action.getPageSize()),
+ Options.pageToken(action.getPageToken()));
+ List databaseList = new ArrayList<>();
+ for (Database database : response.iterateAll()) {
+ databaseList.add(database.getProto());
+ }
+ SpannerActionOutcome outcome =
+ SpannerActionOutcome.newBuilder()
+ .setStatus(toProto(Status.OK))
+ .setAdminResult(
+ AdminResult.newBuilder()
+ .setDatabaseResponse(
+ CloudDatabaseResponse.newBuilder()
+ .addAllListedDatabases(databaseList)
+ .setNextPageToken("")
+ .build()))
+ .build();
+ return sender.sendOutcome(outcome);
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ }
+
+ /** Execute action that lists cloud database operations. */
+ private Status executeListCloudDatabaseOperations(
+ ListCloudDatabaseOperationsAction action, OutcomeSender sender) {
+ try {
+ LOGGER.log(Level.INFO, String.format("Listing database operation: \n%s", action));
+
+ Page response =
+ getClient()
+ .getDatabaseAdminClient()
+ .listDatabaseOperations(
+ action.getInstanceId(),
+ Options.pageSize(action.getPageSize()),
+ Options.filter(action.getFilter()),
+ Options.pageToken(action.getPageToken()));
+ SpannerActionOutcome outcome =
+ SpannerActionOutcome.newBuilder()
+ .setStatus(toProto(Status.OK))
+ .setAdminResult(
+ AdminResult.newBuilder()
+ .setDatabaseResponse(
+ CloudDatabaseResponse.newBuilder()
+ .addAllListedDatabaseOperations(response.iterateAll())
+ .setNextPageToken("")
+ .build()))
+ .build();
+ return sender.sendOutcome(outcome);
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ }
+
+ /** Execute action that restores a cloud database. */
+ private Status executeRestoreCloudDatabase(
+ RestoreCloudDatabaseAction action, OutcomeSender sender) {
+ try {
+ LOGGER.log(Level.INFO, String.format("Restoring database: \n%s", action));
+ Database db =
+ getClient()
+ .getDatabaseAdminClient()
+ .restoreDatabase(
+ action.getBackupInstanceId(),
+ action.getBackupId(),
+ action.getDatabaseInstanceId(),
+ action.getDatabaseId())
+ .get();
+ SpannerActionOutcome outcome =
+ SpannerActionOutcome.newBuilder()
+ .setStatus(toProto(Status.OK))
+ .setAdminResult(
+ AdminResult.newBuilder()
+ .setDatabaseResponse(
+ CloudDatabaseResponse.newBuilder().setDatabase(db.getProto()).build()))
+ .build();
+ return sender.sendOutcome(outcome);
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ }
+
+ /** Execute action that gets a cloud database. */
+ private Status executeGetCloudDatabase(GetCloudDatabaseAction action, OutcomeSender sender) {
+ try {
+ LOGGER.log(Level.INFO, String.format("Getting database: \n%s", action));
+ Database databaseResult =
+ getClient()
+ .getDatabaseAdminClient()
+ .getDatabase(action.getInstanceId(), action.getDatabaseId());
+ SpannerActionOutcome outcome =
+ SpannerActionOutcome.newBuilder()
+ .setStatus(toProto(Status.OK))
+ .setAdminResult(
+ AdminResult.newBuilder()
+ .setDatabaseResponse(
+ CloudDatabaseResponse.newBuilder()
+ .setDatabase(databaseResult.getProto())
+ .build()))
+ .build();
+ return sender.sendOutcome(outcome);
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ }
+
+ /** Execute action that gets an operation. */
+ private Status executeGetOperation(GetOperationAction action, OutcomeSender sender) {
+ try {
+ LOGGER.log(Level.INFO, String.format("Getting operation: \n%s", action));
+ final String operationName = action.getOperation();
+ Operation operationResult = getClient().getDatabaseAdminClient().getOperation(operationName);
+ SpannerActionOutcome outcome =
+ SpannerActionOutcome.newBuilder()
+ .setStatus(toProto(Status.OK))
+ .setAdminResult(
+ AdminResult.newBuilder()
+ .setOperationResponse(
+ OperationResponse.newBuilder().setOperation(operationResult).build()))
+ .build();
+ return sender.sendOutcome(outcome);
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ }
+
+ /** Execute action that cancels an operation. */
+ private Status executeCancelOperation(CancelOperationAction action, OutcomeSender sender) {
+ try {
+ LOGGER.log(Level.INFO, String.format("Cancelling operation: \n%s", action));
+ final String operationName = action.getOperation();
+ getClient().getDatabaseAdminClient().cancelOperation(operationName);
+ return sender.finishWithOK();
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ }
+
+ /** Execute action that starts a batch transaction. */
+ private Status executeStartBatchTxn(
+ StartBatchTransactionAction action,
+ BatchClient batchClient,
+ OutcomeSender sender,
+ ExecutionFlowContext executionContext) {
+ LOGGER.log(Level.INFO, "Starting batch transaction");
+ return executionContext.startBatchTxn(action, batchClient, sender);
+ }
+
+ /** Execute action that finishes a batch transaction. */
+ private Status executeCloseBatchTxn(
+ CloseBatchTransactionAction action,
+ OutcomeSender sender,
+ ExecutionFlowContext executionContext) {
+ try {
+ LOGGER.log(Level.INFO, "Closing batch transaction");
+ if (action.getCleanup()) {
+ executionContext.closeBatchTxn();
+ }
+ return sender.finishWithOK();
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ }
+ }
+
+ /** Execute action that generates database partitions for the given read. */
+ private Status executeGenerateDbPartitionsRead(
+ GenerateDbPartitionsForReadAction action,
+ OutcomeSender sender,
+ ExecutionFlowContext executionContext) {
+ try {
+ BatchReadOnlyTransaction batchTxn = executionContext.getBatchTxn();
+ Metadata metadata = new Metadata(action.getTableList());
+ executionContext.setMetadata(metadata);
+ ReadAction request = action.getRead();
+
+ List typeList = new ArrayList<>();
+ for (int i = 0; i < request.getColumnCount(); ++i) {
+ typeList.add(executionContext.getColumnType(request.getTable(), request.getColumn(i)));
+ }
+ KeySet keySet = keySetProtoToCloudKeySet(request.getKeys(), typeList);
+ PartitionOptions.Builder partitionOptionsBuilder = PartitionOptions.newBuilder();
+ if (action.hasDesiredBytesPerPartition() && action.getDesiredBytesPerPartition() > 0) {
+ partitionOptionsBuilder.setPartitionSizeBytes(action.getDesiredBytesPerPartition());
+ }
+ if (action.hasMaxPartitionCount()) {
+ partitionOptionsBuilder.setMaxPartitions(action.getMaxPartitionCount());
+ }
+ List parts;
+ if (request.hasIndex()) {
+ parts =
+ batchTxn.partitionReadUsingIndex(
+ partitionOptionsBuilder.build(),
+ request.getTable(),
+ request.getIndex(),
+ keySet,
+ new ArrayList<>(request.getColumnList()));
+ } else {
+ parts =
+ batchTxn.partitionRead(
+ partitionOptionsBuilder.build(),
+ request.getTable(),
+ keySet,
+ new ArrayList<>(request.getColumnList()));
+ }
+ List batchPartitions = new ArrayList<>();
+ for (Partition part : parts) {
+ batchPartitions.add(
+ BatchPartition.newBuilder()
+ .setPartition(marshall(part))
+ .setPartitionToken(part.getPartitionToken())
+ .setTable(request.getTable())
+ .setIndex(request.getIndex())
+ .build());
+ }
+ SpannerActionOutcome outcome =
+ SpannerActionOutcome.newBuilder()
+ .setStatus(toProto(Status.OK))
+ .addAllDbPartition(batchPartitions)
+ .build();
+ return sender.sendOutcome(outcome);
+ } catch (SpannerException e) {
+ LOGGER.log(Level.WARNING, String.format("GenerateDbPartitionsRead failed for %s", action));
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ }
+
+ /** Execute action that generates database partitions for the given query. */
+ private Status executeGenerateDbPartitionsQuery(
+ GenerateDbPartitionsForQueryAction action,
+ OutcomeSender sender,
+ ExecutionFlowContext executionContext) {
+ try {
+ BatchReadOnlyTransaction batchTxn = executionContext.getBatchTxn();
+ Statement.Builder stmt = Statement.newBuilder(action.getQuery().getSql());
+ for (int i = 0; i < action.getQuery().getParamsCount(); ++i) {
+ stmt.bind(action.getQuery().getParams(i).getName())
+ .to(
+ valueProtoToCloudValue(
+ action.getQuery().getParams(i).getType(),
+ action.getQuery().getParams(i).getValue()));
+ }
+ PartitionOptions partitionOptions =
+ PartitionOptions.newBuilder()
+ .setPartitionSizeBytes(action.getDesiredBytesPerPartition())
+ .build();
+ List parts = batchTxn.partitionQuery(partitionOptions, stmt.build());
+ List batchPartitions = new ArrayList<>();
+ for (Partition part : parts) {
+ batchPartitions.add(
+ BatchPartition.newBuilder()
+ .setPartition(marshall(part))
+ .setPartitionToken(part.getPartitionToken())
+ .build());
+ }
+
+ SpannerActionOutcome outcome =
+ SpannerActionOutcome.newBuilder()
+ .setStatus(toProto(Status.OK))
+ .addAllDbPartition(batchPartitions)
+ .build();
+ return sender.sendOutcome(outcome);
+ } catch (SpannerException e) {
+ LOGGER.log(Level.WARNING, String.format("GenerateDbPartitionsQuery failed for %s", action));
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ }
+
+ /** Execute a read or query for the given partitions. */
+ private Status executeExecutePartition(
+ ExecutePartitionAction action, OutcomeSender sender, ExecutionFlowContext executionContext) {
+ try {
+ BatchReadOnlyTransaction batchTxn = executionContext.getBatchTxn();
+ ByteString partitionBinary = action.getPartition().getPartition();
+ if (partitionBinary.size() == 0) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Invalid batchPartition " + action);
+ }
+ if (action.getPartition().hasTable()) {
+ sender.initForBatchRead(action.getPartition().getTable(), action.getPartition().getIndex());
+ } else {
+ sender.initForQuery();
+ }
+ Partition partition = unmarshall(partitionBinary);
+ executionContext.startRead();
+ ResultSet result = batchTxn.execute(partition);
+ return processResults(result, 0, sender, executionContext);
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ }
+
+ /** Build a child partition record proto out of childPartitionRecord returned by client. */
+ private ChildPartitionsRecord buildChildPartitionRecord(Struct childPartitionRecord)
+ throws Exception {
+ ChildPartitionsRecord.Builder childPartitionRecordBuilder = ChildPartitionsRecord.newBuilder();
+ childPartitionRecordBuilder.setStartTime(
+ Timestamps.parse(childPartitionRecord.getTimestamp(0).toString()));
+ childPartitionRecordBuilder.setRecordSequence(childPartitionRecord.getString(1));
+ for (Struct childPartition : childPartitionRecord.getStructList(2)) {
+ ChildPartitionsRecord.ChildPartition.Builder childPartitionBuilder =
+ ChildPartitionsRecord.ChildPartition.newBuilder();
+ childPartitionBuilder.setToken(childPartition.getString(0));
+ childPartitionBuilder.addAllParentPartitionTokens(childPartition.getStringList(1));
+ childPartitionRecordBuilder.addChildPartitions(childPartitionBuilder.build());
+ }
+ return childPartitionRecordBuilder.build();
+ }
+
+ /** Build a data change record proto out of dataChangeRecord returned by client. */
+ private DataChangeRecord buildDataChangeRecord(Struct dataChangeRecord) throws Exception {
+ DataChangeRecord.Builder dataChangeRecordBuilder = DataChangeRecord.newBuilder();
+ dataChangeRecordBuilder.setCommitTime(
+ Timestamps.parse(dataChangeRecord.getTimestamp(0).toString()));
+ dataChangeRecordBuilder.setRecordSequence(dataChangeRecord.getString(1));
+ dataChangeRecordBuilder.setTransactionId(dataChangeRecord.getString(2));
+ dataChangeRecordBuilder.setIsLastRecord(dataChangeRecord.getBoolean(3));
+ dataChangeRecordBuilder.setTable(dataChangeRecord.getString(4));
+ for (Struct columnType : dataChangeRecord.getStructList(5)) {
+ DataChangeRecord.ColumnType.Builder columnTypeBuilder =
+ DataChangeRecord.ColumnType.newBuilder();
+ columnTypeBuilder.setName(columnType.getString(0));
+ columnTypeBuilder.setType(getJsonStringForStructColumn(columnType, 1));
+ columnTypeBuilder.setIsPrimaryKey(columnType.getBoolean(2));
+ columnTypeBuilder.setOrdinalPosition(columnType.getLong(3));
+ dataChangeRecordBuilder.addColumnTypes(columnTypeBuilder.build());
+ }
+ for (Struct mod : dataChangeRecord.getStructList(6)) {
+ DataChangeRecord.Mod.Builder modBuilder = DataChangeRecord.Mod.newBuilder();
+ modBuilder.setKeys(getJsonStringForStructColumn(mod, 0));
+ modBuilder.setNewValues(getJsonStringForStructColumn(mod, 1));
+ modBuilder.setOldValues(getJsonStringForStructColumn(mod, 2));
+ dataChangeRecordBuilder.addMods(modBuilder.build());
+ }
+ dataChangeRecordBuilder.setModType(dataChangeRecord.getString(7));
+ dataChangeRecordBuilder.setValueCaptureType(dataChangeRecord.getString(8));
+
+ // Get transaction tag.
+ dataChangeRecordBuilder.setTransactionTag(
+ dataChangeRecord.getString(DataChangeRecord.TRANSACTION_TAG_FIELD_NUMBER - 1));
+
+ // Get is system transaction.
+ dataChangeRecordBuilder.setIsSystemTransaction(
+ dataChangeRecord.getBoolean(DataChangeRecord.IS_SYSTEM_TRANSACTION_FIELD_NUMBER - 1));
+ return dataChangeRecordBuilder.build();
+ }
+
+ /** Returns the json or string value of a struct column with index=columnIndex. */
+ private String getJsonStringForStructColumn(Struct struct, int columnIndex) {
+ Type columnType = struct.getColumnType(columnIndex);
+ switch (columnType.getCode()) {
+ case JSON:
+ return struct.getJson(columnIndex);
+ case STRING:
+ return struct.getString(columnIndex);
+ default:
+ throw new IllegalArgumentException(
+ String.format(
+ "Cannot extract value from column with index = %d and column type = %s for struct:"
+ + " %s",
+ columnIndex, columnType, struct));
+ }
+ }
+
+ /** Build a heartbeat record proto out of heartbeatRecord returned by client. */
+ private HeartbeatRecord buildHeartbeatRecord(Struct heartbeatRecord) throws Exception {
+ HeartbeatRecord.Builder heartbeatRecordBuilder = HeartbeatRecord.newBuilder();
+ heartbeatRecordBuilder.setHeartbeatTime(
+ Timestamps.parse(heartbeatRecord.getTimestamp(0).toString()));
+ return heartbeatRecordBuilder.build();
+ }
+
+ /** Execute action that executes a change stream query. */
+ private Status executeExecuteChangeStreamQuery(
+ String dbPath, ExecuteChangeStreamQuery action, OutcomeSender sender) {
+ try {
+ LOGGER.log(
+ Level.INFO, String.format("Start executing change change stream query: \n%s", action));
+
+ // Retrieve TVF parameters from the action.
+ String changeStreamName = action.getName();
+ // For initial partition query (no partition token) we simulate precision of the timestamp
+ // in nanoseconds as that's closer inlined with the production client code.
+
+ String startTime =
+ timestampToString(
+ !action.hasPartitionToken(), Timestamps.toMicros(action.getStartTime()));
+ String endTime = "null";
+ if (action.hasEndTime()) {
+ endTime =
+ timestampToString(
+ !action.hasPartitionToken(), Timestamps.toMicros(action.getEndTime()));
+ }
+ String heartbeat = "null";
+ if (action.hasHeartbeatMilliseconds()) {
+ heartbeat = Integer.toString(action.getHeartbeatMilliseconds());
+ }
+ String partitionToken = "null";
+ if (action.hasPartitionToken()) {
+ partitionToken = String.format("\"%s\"", action.getPartitionToken());
+ }
+
+ String tvfQuery =
+ String.format(
+ "SELECT * FROM READ_%s(%s,%s,%s,%s);",
+ changeStreamName, startTime, endTime, partitionToken, heartbeat);
+
+ LOGGER.log(Level.INFO, String.format("Start executing change stream TVF: \n%s", tvfQuery));
+ sender.initForChangeStreamQuery(
+ action.getHeartbeatMilliseconds(), action.getName(), action.getPartitionToken());
+ Spanner spannerClient;
+ if (action.hasDeadlineSeconds()) {
+ spannerClient = getClientWithTimeout(action.getDeadlineSeconds());
+ } else {
+ spannerClient = getClient();
+ }
+ DatabaseClient dbClient = spannerClient.getDatabaseClient(DatabaseId.of(dbPath));
+ ResultSet resultSet = dbClient.singleUse().executeQuery(Statement.of(tvfQuery));
+
+ ChangeStreamRecord.Builder changeStreamRecordBuilder = ChangeStreamRecord.newBuilder();
+ while (resultSet.next()) {
+ Struct record = resultSet.getStructList(0).get(0);
+ for (Struct dataChangeRecord : record.getStructList("data_change_record")) {
+ // If the data change record is null, that means the ChangeRecord is either a heartbeat
+ // or a child partitions record.
+ if (dataChangeRecord.isNull(0)) {
+ continue;
+ }
+ DataChangeRecord builtDataChangeRecord = buildDataChangeRecord(dataChangeRecord);
+ changeStreamRecordBuilder.setDataChange(builtDataChangeRecord);
+ }
+ for (Struct heartbeatRecord : record.getStructList("heartbeat_record")) {
+ // If the heartbeat record is null, that means the ChangeRecord is either a data change
+ // record or a child partitions record.
+ if (heartbeatRecord.isNull(0)) {
+ continue;
+ }
+ HeartbeatRecord builtHeartbeatRecord = buildHeartbeatRecord(heartbeatRecord);
+ changeStreamRecordBuilder.setHeartbeat(builtHeartbeatRecord);
+ }
+ for (Struct childPartitionRecord : record.getStructList("child_partitions_record")) {
+ // If the child partitions record is null, that means the ChangeRecord is either a
+ // data change record or a heartbeat record.
+ if (childPartitionRecord.isNull(0)) {
+ continue;
+ }
+ ChildPartitionsRecord builtChildPartitionsRecord =
+ buildChildPartitionRecord(childPartitionRecord);
+ changeStreamRecordBuilder.setChildPartition(builtChildPartitionsRecord);
+ }
+ // For partitioned queries, validate that the time between received change records are
+ // less than 10x the heartbeat interval.
+ // Right now, we are not failing the handler since there are other issues besides change
+ // stream related issues that can cause the heartbeat check to fail (i.e. RPC latency).
+ if (sender.getIsPartitionedChangeStreamQuery()) {
+ long lastReceivedTimestamp = sender.getChangeStreamRecordReceivedTimestamp();
+ long currentChangeRecordReceivedTimestamp = System.currentTimeMillis();
+ long discrepancyMillis = currentChangeRecordReceivedTimestamp - lastReceivedTimestamp;
+ // Only do the heartbeat check after we have already received one record for the query
+ // (i.e. lastReceivedTimestamp > 0).
+ // We should only check the heartbeat interval if heartbeat is greater than 5 seconds,
+ // to prevent flaky failures.
+ if (lastReceivedTimestamp > 0
+ && discrepancyMillis > sender.getChangeStreamHeartbeatMilliSeconds() * 10
+ && sender.getChangeStreamHeartbeatMilliSeconds() > 5000) {
+ // Log.info(
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INTERNAL,
+ "Does not pass the heartbeat interval check. The last record was received seconds"
+ + discrepancyMillis / 1000
+ + " ago, which is more than ten times the heartbeat interval, which is "
+ + sender.getChangeStreamHeartbeatMilliSeconds() / 1000
+ + " seconds. The change record received is: "
+ + changeStreamRecordBuilder.build());
+ }
+ sender.updateChangeStreamRecordReceivedTimestamp(currentChangeRecordReceivedTimestamp);
+ }
+ Status appendStatus = sender.appendChangeStreamRecord(changeStreamRecordBuilder.build());
+ if (!appendStatus.isOk()) {
+ return appendStatus;
+ }
+ }
+ return sender.finishWithOK();
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ LOGGER.log(Level.WARNING, "Unexpected error: " + e.getMessage());
+ if (e instanceof DeadlineExceededException) {
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.DEADLINE_EXCEEDED, "Deadline exceeded error: " + e)));
+ } else if (e instanceof UnavailableException) {
+ return toStatus(
+ SpannerExceptionFactory.newSpannerException(ErrorCode.UNAVAILABLE, e.getMessage()));
+ }
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e)));
+ }
+ }
+
+ /**
+ * Execute action that start a read-write or read-only transaction. For read-write transaction,
+ * see {@link ReadWriteTransaction}.
+ */
+ private Status executeStartTxn(
+ StartTransactionAction action,
+ DatabaseClient dbClient,
+ OutcomeSender sender,
+ ExecutionFlowContext executionContext) {
+ try {
+ executionContext.updateTransactionSeed(action.getTransactionSeed());
+ Metadata metadata = new Metadata(action.getTableList());
+ if (action.hasConcurrency()) {
+ LOGGER.log(
+ Level.INFO,
+ String.format(
+ "Starting read-only transaction %s\n", executionContext.getTransactionSeed()));
+ executionContext.startReadOnlyTxn(
+ dbClient, timestampBoundsFromConcurrency(action.getConcurrency()), metadata);
+ } else {
+ LOGGER.log(
+ Level.INFO,
+ "Starting read-write transaction %s\n",
+ executionContext.getTransactionSeed());
+ executionContext.startReadWriteTxn(dbClient, metadata);
+ }
+ executionContext.setDatabaseClient(dbClient);
+ executionContext.initReadState();
+ return sender.finishWithOK();
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ } catch (Exception e) {
+ return sender.finishWithError(
+ toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error: " + e.getMessage())));
+ }
+ }
+
+ /**
+ * Execute action that finish a transaction. For read-write transaction, either commit or abandon
+ * the transaction is allowed. Batch transaction is not supported.
+ */
+ private Status executeFinishTxn(
+ FinishTransactionAction action, OutcomeSender sender, ExecutionFlowContext executionContext) {
+ LOGGER.log(
+ Level.INFO,
+ String.format(
+ "Finishing transaction %s\n%s", executionContext.getTransactionSeed(), action));
+ return executionContext.finish(action.getMode(), sender);
+ }
+
+ /** Execute mutation action request and buffer the mutations. */
+ private Status executeMutation(
+ MutationAction action,
+ OutcomeSender sender,
+ ExecutionFlowContext executionContext,
+ boolean isWrite) {
+ String prevTable = "";
+ try {
+ for (int i = 0; i < action.getModCount(); ++i) {
+ Mod mod = action.getMod(i);
+ String table = mod.getTable();
+ if (table.isEmpty()) {
+ table = prevTable;
+ }
+ if (table.isEmpty()) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Table name missing: " + action);
+ }
+ prevTable = table;
+ LOGGER.log(Level.FINE, String.format("Executing mutation mod: \n%s", mod));
+
+ final List mutations = Lists.newArrayList();
+
+ if (mod.hasInsert()) {
+ InsertArgs insertArgs = mod.getInsert();
+ for (int j = 0; j < insertArgs.getValuesCount(); ++j) {
+ mutations.add(
+ buildWrite(
+ insertArgs.getColumnList(),
+ cloudValuesFromValueList(insertArgs.getValues(j), insertArgs.getTypeList()),
+ Mutation.newInsertBuilder(table)));
+ }
+ } else if (mod.hasUpdate()) {
+ UpdateArgs updateArgs = mod.getUpdate();
+ for (int j = 0; j < updateArgs.getValuesCount(); ++j) {
+ mutations.add(
+ buildWrite(
+ updateArgs.getColumnList(),
+ cloudValuesFromValueList(updateArgs.getValues(j), updateArgs.getTypeList()),
+ Mutation.newUpdateBuilder(table)));
+ }
+ } else if (mod.hasInsertOrUpdate()) {
+ InsertArgs insertArgs = mod.getInsertOrUpdate();
+ for (int j = 0; j < insertArgs.getValuesCount(); ++j) {
+ mutations.add(
+ buildWrite(
+ insertArgs.getColumnList(),
+ cloudValuesFromValueList(insertArgs.getValues(j), insertArgs.getTypeList()),
+ Mutation.newInsertOrUpdateBuilder(table)));
+ }
+ } else if (mod.hasReplace()) {
+ InsertArgs insertArgs = mod.getReplace();
+ for (int j = 0; j < insertArgs.getValuesCount(); ++j) {
+ mutations.add(
+ buildWrite(
+ insertArgs.getColumnList(),
+ cloudValuesFromValueList(insertArgs.getValues(j), insertArgs.getTypeList()),
+ Mutation.newReplaceBuilder(table)));
+ }
+ } else if (mod.hasDeleteKeys()) {
+ KeySet keySet =
+ keySetProtoToCloudKeySet(
+ mod.getDeleteKeys(), executionContext.getKeyColumnTypes(table));
+ mutations.add(Mutation.delete(table, keySet));
+ } else {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unsupported mod: " + mod);
+ }
+ if (!isWrite) {
+ executionContext.bufferMutations(mutations);
+ } else {
+ executionContext.getDbClient().write(mutations);
+ }
+ }
+ return sender.finishWithOK();
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ }
+ }
+
+ /** Build a Mutation by using the given WriteBuilder to set the columns for the action. */
+ private Mutation buildWrite(
+ List columnList, List valueList, WriteBuilder write) {
+ Preconditions.checkState(columnList.size() == valueList.size());
+ for (int i = 0; i < columnList.size(); i++) {
+ write.set(columnList.get(i)).to(valueList.get(i));
+ }
+ return write.build();
+ }
+
+ /** Execute a read action request, store the results in the OutcomeSender. */
+ private Status executeRead(
+ ReadAction action, OutcomeSender sender, ExecutionFlowContext executionContext) {
+ try {
+ LOGGER.log(
+ Level.INFO,
+ String.format("Executing read %s\n%s\n", executionContext.getTransactionSeed(), action));
+ List typeList = new ArrayList<>();
+ if (action.hasIndex()) {
+ // For index read, we assume the serviceKeyFile columns are listed at the front of the read
+ // column
+ // list.
+ for (int i = 0; i < action.getColumnCount(); ++i) {
+ String col = action.getColumn(i);
+ typeList.add(executionContext.getColumnType(action.getTable(), col));
+ }
+ } else {
+ typeList = executionContext.getKeyColumnTypes(action.getTable());
+ }
+ KeySet keySet = keySetProtoToCloudKeySet(action.getKeys(), typeList);
+ ReadContext txn = executionContext.getTransactionForRead();
+ sender.initForRead(action.getTable(), action.getIndex());
+
+ executionContext.startRead();
+ LOGGER.log(
+ Level.INFO,
+ String.format(
+ "Finish read building, ready to execute %s\n",
+ executionContext.getTransactionSeed()));
+ ResultSet result;
+ if (action.hasIndex()) {
+ result =
+ txn.readUsingIndex(
+ action.getTable(), action.getIndex(), keySet, action.getColumnList());
+ } else {
+ result = txn.read(action.getTable(), keySet, action.getColumnList());
+ }
+ LOGGER.log(
+ Level.INFO,
+ String.format("Parsing read result %s\n", executionContext.getTransactionSeed()));
+ return processResults(result, action.getLimit(), sender, executionContext);
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ }
+ }
+
+ /** Execute a query action request, store the results in the OutcomeSender. */
+ private Status executeQuery(
+ QueryAction action, OutcomeSender sender, ExecutionFlowContext executionContext) {
+ try {
+ LOGGER.log(
+ Level.INFO,
+ String.format("Executing query %s\n%s\n", executionContext.getTransactionSeed(), action));
+ ReadContext txn = executionContext.getTransactionForRead();
+ sender.initForQuery();
+
+ Statement.Builder stmt = Statement.newBuilder(action.getSql());
+ for (int i = 0; i < action.getParamsCount(); ++i) {
+ stmt.bind(action.getParams(i).getName())
+ .to(
+ valueProtoToCloudValue(
+ action.getParams(i).getType(), action.getParams(i).getValue()));
+ }
+
+ executionContext.startRead();
+ LOGGER.log(
+ Level.INFO,
+ String.format(
+ "Finish query building, ready to execute %s\n",
+ executionContext.getTransactionSeed()));
+ ResultSet result = txn.executeQuery(stmt.build(), Options.tag("query-tag"));
+ LOGGER.log(
+ Level.INFO,
+ String.format("Parsing query result %s\n", executionContext.getTransactionSeed()));
+ return processResults(result, 0, sender, executionContext);
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ }
+ }
+
+ /** Execute a dml update action request, store the results in the OutcomeSender. */
+ private Status executeCloudDmlUpdate(
+ DmlAction action, OutcomeSender sender, ExecutionFlowContext executionContext) {
+ try {
+ LOGGER.log(
+ Level.INFO,
+ String.format(
+ "Executing Dml update %s\n%s\n", executionContext.getTransactionSeed(), action));
+ QueryAction update = action.getUpdate();
+ Statement.Builder stmt = Statement.newBuilder(update.getSql());
+ for (int i = 0; i < update.getParamsCount(); ++i) {
+ stmt.bind(update.getParams(i).getName())
+ .to(
+ valueProtoToCloudValue(
+ update.getParams(i).getType(), update.getParams(i).getValue()));
+ }
+ sender.initForQuery();
+ ResultSet result =
+ executionContext
+ .getTransactionForWrite()
+ .executeQuery(stmt.build(), Options.tag("dml-transaction-tag"));
+ LOGGER.log(
+ Level.INFO,
+ String.format("Parsing Dml result %s\n", executionContext.getTransactionSeed()));
+ return processResults(result, 0, sender, executionContext, true);
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ }
+ }
+
+ /** Execute a BatchDml update action request, store the results in the OutcomeSender. */
+ private Status executeCloudBatchDmlUpdates(
+ BatchDmlAction action, OutcomeSender sender, ExecutionFlowContext executionContext) {
+ try {
+ List queries = new ArrayList<>();
+ for (int i = 0; i < action.getUpdatesCount(); ++i) {
+ LOGGER.log(
+ Level.INFO,
+ String.format(
+ "Executing BatchDml update [%d] %s\n%s\n",
+ i + 1, executionContext.getTransactionSeed(), action));
+ QueryAction update = action.getUpdates(i);
+ Statement.Builder stmt = Statement.newBuilder(update.getSql());
+ for (int j = 0; j < update.getParamsCount(); ++j) {
+ stmt.bind(update.getParams(i).getName())
+ .to(
+ valueProtoToCloudValue(
+ update.getParams(i).getType(), update.getParams(i).getValue()));
+ }
+ queries.add(stmt.build());
+ }
+ long[] rowCounts = executionContext.executeBatchDml(queries);
+ sender.initForQuery();
+ for (long rowCount : rowCounts) {
+ sender.appendRowsModifiedInDml(rowCount);
+ }
+ // The batchDml request failed. By design, `rowCounts` contains rows
+ // modified for DML queries that succeeded only. Add 0 as the row count
+ // for the last executed DML in the batch (that failed).
+ if (rowCounts.length != queries.size()) {
+ sender.appendRowsModifiedInDml(0L);
+ }
+ return sender.finishWithOK();
+ } catch (SpannerException e) {
+ return sender.finishWithError(toStatus(e));
+ }
+ }
+
+ /** Process a ResultSet from a read/query and store the results in the OutcomeSender. */
+ private Status processResults(
+ ResultSet results, int limit, OutcomeSender sender, ExecutionFlowContext executionContext) {
+ return processResults(results, limit, sender, executionContext, false);
+ }
+
+ /** Process a ResultSet from a read/query/dml and store the results in the OutcomeSender. */
+ private Status processResults(
+ ResultSet results,
+ int limit,
+ OutcomeSender sender,
+ ExecutionFlowContext executionContext,
+ boolean isDml) {
+ try {
+ int rowCount = 0;
+ LOGGER.log(
+ Level.INFO,
+ String.format("Iterating result set: %s\n", executionContext.getTransactionSeed()));
+ while (results.next()) {
+ com.google.spanner.executor.v1.ValueList row =
+ buildRow(results.getCurrentRowAsStruct(), sender);
+ Status appendStatus = sender.appendRow(row);
+ if (!appendStatus.isOk()) {
+ return appendStatus;
+ }
+ ++rowCount;
+ if (limit > 0 && rowCount >= limit) {
+ LOGGER.log(Level.INFO, "Stopping at row limit: " + limit);
+ break;
+ }
+ }
+ if (isDml) {
+ sender.appendRowsModifiedInDml(
+ Objects.requireNonNull(results.getStats()).getRowCountExact());
+ }
+
+ LOGGER.log(
+ Level.INFO,
+ String.format(
+ "Successfully processed result: %s\n", executionContext.getTransactionSeed()));
+ executionContext.finishRead(Status.OK);
+ return sender.finishWithOK();
+ } catch (SpannerException e) {
+ Status status = toStatus(e);
+ LOGGER.log(
+ Level.WARNING,
+ String.format(
+ "Encountered exception: %s %s\n",
+ status.getDescription(), executionContext.getTransactionSeed()));
+ executionContext.finishRead(status);
+ if (status.getCode() == Status.ABORTED.getCode()) {
+ return sender.finishWithTransactionRestarted();
+ } else {
+ if (status.getCode() == Status.UNAUTHENTICATED.getCode()) {
+ try {
+ LOGGER.log(
+ Level.INFO,
+ String.format(
+ "Found Unauthenticated error, client credentials:\n%s",
+ getClient().getOptions().getCredentials().toString()));
+ } catch (Exception exception) {
+ LOGGER.log(Level.WARNING, String.format("Failed to getClient %s", exception));
+ }
+ }
+ return sender.finishWithError(status);
+ }
+ } finally {
+ LOGGER.log(
+ Level.INFO,
+ String.format("Closing result set %s\n", executionContext.getTransactionSeed()));
+ results.close();
+ }
+ }
+
+ /** Convert a result row to a row proto(value list) for sending back to the client. */
+ private com.google.spanner.executor.v1.ValueList buildRow(
+ StructReader result, OutcomeSender sender) throws SpannerException {
+ com.google.spanner.executor.v1.ValueList.Builder rowBuilder =
+ com.google.spanner.executor.v1.ValueList.newBuilder();
+ com.google.spanner.v1.StructType.Builder rowTypeBuilder =
+ com.google.spanner.v1.StructType.newBuilder();
+ for (int i = 0; i < result.getColumnCount(); ++i) {
+ com.google.cloud.spanner.Type columnType = result.getColumnType(i);
+ rowTypeBuilder.addFields(
+ com.google.spanner.v1.StructType.Field.newBuilder()
+ .setName(result.getType().getStructFields().get(i).getName())
+ .setType(cloudTypeToTypeProto(columnType))
+ .build());
+ com.google.spanner.executor.v1.Value.Builder value =
+ com.google.spanner.executor.v1.Value.newBuilder();
+ if (result.isNull(i)) {
+ value.setIsNull(true);
+ } else {
+ switch (columnType.getCode()) {
+ case BOOL:
+ value.setBoolValue(result.getBoolean(i));
+ break;
+ case FLOAT64:
+ value.setDoubleValue(result.getDouble(i));
+ break;
+ case INT64:
+ value.setIntValue(result.getLong(i));
+ break;
+ case STRING:
+ value.setStringValue(result.getString(i));
+ break;
+ case BYTES:
+ value.setBytesValue(toByteString(result.getBytes(i)));
+ break;
+ case TIMESTAMP:
+ value.setTimestampValue(timestampToProto(result.getTimestamp(i)));
+ break;
+ case DATE:
+ value.setDateDaysValue(daysFromDate(result.getDate(i)));
+ break;
+ case NUMERIC:
+ String ascii = result.getBigDecimal(i).toPlainString();
+ value.setStringValue(ascii);
+ break;
+ case JSON:
+ value.setStringValue(result.getJson(i));
+ break;
+ case ARRAY:
+ switch (result.getColumnType(i).getArrayElementType().getCode()) {
+ case BOOL:
+ {
+ com.google.spanner.executor.v1.ValueList.Builder builder =
+ com.google.spanner.executor.v1.ValueList.newBuilder();
+ List values = result.getBooleanList(i);
+ for (Boolean booleanValue : values) {
+ com.google.spanner.executor.v1.Value.Builder valueProto =
+ com.google.spanner.executor.v1.Value.newBuilder();
+ if (booleanValue == null) {
+ builder.addValue(valueProto.setIsNull(true).build());
+ } else {
+ builder.addValue(valueProto.setBoolValue(booleanValue).build());
+ }
+ }
+ value.setArrayValue(builder.build());
+ value.setArrayType(
+ com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.BOOL).build());
+ }
+ break;
+ case FLOAT64:
+ {
+ com.google.spanner.executor.v1.ValueList.Builder builder =
+ com.google.spanner.executor.v1.ValueList.newBuilder();
+ List values = result.getDoubleList(i);
+ for (Double doubleValue : values) {
+ com.google.spanner.executor.v1.Value.Builder valueProto =
+ com.google.spanner.executor.v1.Value.newBuilder();
+ if (doubleValue == null) {
+ builder.addValue(valueProto.setIsNull(true).build());
+ } else {
+ builder.addValue(valueProto.setDoubleValue(doubleValue).build());
+ }
+ }
+ value.setArrayValue(builder.build());
+ value.setArrayType(
+ com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.FLOAT64).build());
+ }
+ break;
+ case INT64:
+ {
+ com.google.spanner.executor.v1.ValueList.Builder builder =
+ com.google.spanner.executor.v1.ValueList.newBuilder();
+ List values = result.getLongList(i);
+ for (Long longValue : values) {
+ com.google.spanner.executor.v1.Value.Builder valueProto =
+ com.google.spanner.executor.v1.Value.newBuilder();
+ if (longValue == null) {
+ builder.addValue(valueProto.setIsNull(true).build());
+ } else {
+ builder.addValue(valueProto.setIntValue(longValue).build());
+ }
+ }
+ value.setArrayValue(builder.build());
+ value.setArrayType(
+ com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.INT64).build());
+ }
+ break;
+ case STRING:
+ {
+ com.google.spanner.executor.v1.ValueList.Builder builder =
+ com.google.spanner.executor.v1.ValueList.newBuilder();
+ List values = result.getStringList(i);
+ for (String stringValue : values) {
+ com.google.spanner.executor.v1.Value.Builder valueProto =
+ com.google.spanner.executor.v1.Value.newBuilder();
+ if (stringValue == null) {
+ builder.addValue(valueProto.setIsNull(true).build());
+ } else {
+ builder.addValue(valueProto.setStringValue(stringValue)).build();
+ }
+ }
+ value.setArrayValue(builder.build());
+ value.setArrayType(
+ com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.STRING).build());
+ }
+ break;
+ case BYTES:
+ {
+ com.google.spanner.executor.v1.ValueList.Builder builder =
+ com.google.spanner.executor.v1.ValueList.newBuilder();
+ List values = result.getBytesList(i);
+ for (ByteArray byteArrayValue : values) {
+ com.google.spanner.executor.v1.Value.Builder valueProto =
+ com.google.spanner.executor.v1.Value.newBuilder();
+ if (byteArrayValue == null) {
+ builder.addValue(valueProto.setIsNull(true).build());
+ } else {
+ builder.addValue(
+ valueProto
+ .setBytesValue(ByteString.copyFrom(byteArrayValue.toByteArray()))
+ .build());
+ }
+ }
+ value.setArrayValue(builder.build());
+ value.setArrayType(
+ com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.BYTES).build());
+ }
+ break;
+ case DATE:
+ {
+ com.google.spanner.executor.v1.ValueList.Builder builder =
+ com.google.spanner.executor.v1.ValueList.newBuilder();
+ List values = result.getDateList(i);
+ for (Date dateValue : values) {
+ com.google.spanner.executor.v1.Value.Builder valueProto =
+ com.google.spanner.executor.v1.Value.newBuilder();
+ if (dateValue == null) {
+ builder.addValue(valueProto.setIsNull(true).build());
+ } else {
+ builder.addValue(
+ valueProto.setDateDaysValue(daysFromDate(dateValue)).build());
+ }
+ }
+ value.setArrayValue(builder.build());
+ value.setArrayType(
+ com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.DATE).build());
+ }
+ break;
+ case TIMESTAMP:
+ {
+ com.google.spanner.executor.v1.ValueList.Builder builder =
+ com.google.spanner.executor.v1.ValueList.newBuilder();
+ List values = result.getTimestampList(i);
+ for (Timestamp timestampValue : values) {
+ com.google.spanner.executor.v1.Value.Builder valueProto =
+ com.google.spanner.executor.v1.Value.newBuilder();
+ if (timestampValue == null) {
+ builder.addValue(valueProto.setIsNull(true).build());
+ } else {
+ builder.addValue(
+ valueProto.setTimestampValue(timestampToProto(timestampValue)).build());
+ }
+ }
+ value.setArrayValue(builder.build());
+ value.setArrayType(
+ com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.TIMESTAMP).build());
+ }
+ break;
+ case NUMERIC:
+ {
+ com.google.spanner.executor.v1.ValueList.Builder builder =
+ com.google.spanner.executor.v1.ValueList.newBuilder();
+ List values = result.getBigDecimalList(i);
+ for (BigDecimal bigDec : values) {
+ com.google.spanner.executor.v1.Value.Builder valueProto =
+ com.google.spanner.executor.v1.Value.newBuilder();
+ if (bigDec == null) {
+ builder.addValue(valueProto.setIsNull(true).build());
+ } else {
+ builder.addValue(valueProto.setStringValue(bigDec.toPlainString()).build());
+ }
+ }
+ value.setArrayValue(builder.build());
+ value.setArrayType(
+ com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.NUMERIC).build());
+ }
+ break;
+ case JSON:
+ {
+ com.google.spanner.executor.v1.ValueList.Builder builder =
+ com.google.spanner.executor.v1.ValueList.newBuilder();
+ List values = result.getJsonList(i);
+ for (String stringValue : values) {
+ com.google.spanner.executor.v1.Value.Builder valueProto =
+ com.google.spanner.executor.v1.Value.newBuilder();
+ if (stringValue == null) {
+ builder.addValue(valueProto.setIsNull(true).build());
+ } else {
+ builder.addValue(valueProto.setStringValue(stringValue)).build();
+ }
+ }
+ value.setArrayValue(builder.build());
+ value.setArrayType(
+ com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.JSON).build());
+ }
+ break;
+ default:
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT,
+ "Unsupported row array type: "
+ + result.getColumnType(i)
+ + " for result type "
+ + result.getType().toString());
+ }
+ break;
+ default:
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT,
+ "Unsupported row type: "
+ + result.getColumnType(i)
+ + " for result type "
+ + result.getType().toString());
+ }
+ }
+ rowBuilder.addValue(value.build());
+ }
+ sender.setRowType(rowTypeBuilder.build());
+ return rowBuilder.build();
+ }
+
+ /** Convert a ListValue proto to a list of cloud Value. */
+ private static List cloudValuesFromValueList(
+ com.google.spanner.executor.v1.ValueList valueList, List typeList)
+ throws SpannerException {
+ LOGGER.log(Level.INFO, String.format("Converting valueList: %s\n", valueList));
+ Preconditions.checkState(valueList.getValueCount() == typeList.size());
+ List cloudValues = new ArrayList<>();
+ for (int i = 0; i < valueList.getValueCount(); ++i) {
+ com.google.cloud.spanner.Value value =
+ valueProtoToCloudValue(typeList.get(i), valueList.getValue(i));
+ cloudValues.add(value);
+ }
+ return cloudValues;
+ }
+
+ /** Convert a proto KeySet to a cloud KeySet. */
+ private static com.google.cloud.spanner.KeySet keySetProtoToCloudKeySet(
+ com.google.spanner.executor.v1.KeySet keySetProto, List typeList)
+ throws SpannerException {
+ if (keySetProto.getAll()) {
+ return com.google.cloud.spanner.KeySet.all();
+ }
+ com.google.cloud.spanner.KeySet.Builder cloudKeySetBuilder =
+ com.google.cloud.spanner.KeySet.newBuilder();
+ for (int i = 0; i < keySetProto.getPointCount(); ++i) {
+ cloudKeySetBuilder.addKey(keyProtoToCloudKey(keySetProto.getPoint(i), typeList));
+ }
+ for (int i = 0; i < keySetProto.getRangeCount(); ++i) {
+ cloudKeySetBuilder.addRange(keyRangeProtoToCloudKeyRange(keySetProto.getRange(i), typeList));
+ }
+ return cloudKeySetBuilder.build();
+ }
+
+ /** Convert a keyRange proto to a cloud KeyRange. */
+ private static com.google.cloud.spanner.KeyRange keyRangeProtoToCloudKeyRange(
+ com.google.spanner.executor.v1.KeyRange keyRangeProto,
+ List typeList)
+ throws SpannerException {
+ Key start = keyProtoToCloudKey(keyRangeProto.getStart(), typeList);
+ Key end = keyProtoToCloudKey(keyRangeProto.getLimit(), typeList);
+ if (!keyRangeProto.hasType()) {
+ // default
+ return KeyRange.closedOpen(start, end);
+ }
+ switch (keyRangeProto.getType()) {
+ case CLOSED_CLOSED:
+ return KeyRange.closedClosed(start, end);
+ case CLOSED_OPEN:
+ return KeyRange.closedOpen(start, end);
+ case OPEN_CLOSED:
+ return KeyRange.openClosed(start, end);
+ case OPEN_OPEN:
+ return KeyRange.openOpen(start, end);
+ // Unreachable.
+ default:
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unrecognized serviceKeyFile range type");
+ }
+ }
+
+ /** Convert a serviceKeyFile proto(value list) to a cloud Key. */
+ private static com.google.cloud.spanner.Key keyProtoToCloudKey(
+ com.google.spanner.executor.v1.ValueList keyProto, List typeList)
+ throws SpannerException {
+ com.google.cloud.spanner.Key.Builder cloudKey = com.google.cloud.spanner.Key.newBuilder();
+ if (typeList.size() < keyProto.getValueCount()) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT,
+ "There's more serviceKeyFile parts in " + keyProto + " than column types in " + typeList);
+ }
+
+ for (int i = 0; i < keyProto.getValueCount(); ++i) {
+ com.google.spanner.v1.Type type = typeList.get(i);
+ com.google.spanner.executor.v1.Value part = keyProto.getValue(i);
+ if (part.hasIsNull()) {
+ switch (type.getCode()) {
+ case BOOL:
+ case INT64:
+ case STRING:
+ case BYTES:
+ case FLOAT64:
+ case DATE:
+ case TIMESTAMP:
+ case NUMERIC:
+ case JSON:
+ cloudKey.appendObject(null);
+ break;
+ default:
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT,
+ "Unsupported null serviceKeyFile part type: " + type.getCode().name());
+ }
+ } else if (part.hasIntValue()) {
+ cloudKey.append(part.getIntValue());
+ } else if (part.hasBoolValue()) {
+ cloudKey.append(part.getBoolValue());
+ } else if (part.hasDoubleValue()) {
+ cloudKey.append(part.getDoubleValue());
+ } else if (part.hasBytesValue()) {
+ switch (type.getCode()) {
+ case STRING:
+ cloudKey.append(part.getBytesValue().toStringUtf8());
+ break;
+ case BYTES:
+ cloudKey.append(toByteArray(part.getBytesValue()));
+ break;
+ // Unreachable
+ default:
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT,
+ "Unsupported serviceKeyFile part type: " + type.getCode().name());
+ }
+ } else if (part.hasStringValue()) {
+ if (type.getCode() == TypeCode.NUMERIC) {
+ String ascii = part.getStringValue();
+ cloudKey.append(new BigDecimal(ascii));
+ } else {
+ cloudKey.append(part.getStringValue());
+ }
+ } else if (part.hasTimestampValue()) {
+ cloudKey.append(Timestamp.parseTimestamp(Timestamps.toString(part.getTimestampValue())));
+ } else if (part.hasDateDaysValue()) {
+ cloudKey.append(dateFromDays(part.getDateDaysValue()));
+ } else {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unsupported serviceKeyFile part: " + part);
+ }
+ }
+ return cloudKey.build();
+ }
+
+ /** Convert a Value proto to a cloud Value. */
+ @SuppressWarnings("NullTernary")
+ private static com.google.cloud.spanner.Value valueProtoToCloudValue(
+ com.google.spanner.v1.Type type, com.google.spanner.executor.v1.Value value) {
+ if (value.hasIsCommitTimestamp() && value.getIsCommitTimestamp()) {
+ return Value.timestamp(com.google.cloud.spanner.Value.COMMIT_TIMESTAMP);
+ }
+ switch (type.getCode()) {
+ case INT64:
+ return com.google.cloud.spanner.Value.int64(value.hasIsNull() ? null : value.getIntValue());
+ case FLOAT64:
+ return com.google.cloud.spanner.Value.float64(
+ value.hasIsNull() ? null : value.getDoubleValue());
+ case STRING:
+ return com.google.cloud.spanner.Value.string(
+ value.hasIsNull() ? null : value.getStringValue());
+ case BYTES:
+ return com.google.cloud.spanner.Value.bytes(
+ value.hasIsNull() ? null : ByteArray.copyFrom(value.getBytesValue().toByteArray()));
+ case BOOL:
+ return com.google.cloud.spanner.Value.bool(value.hasIsNull() ? null : value.getBoolValue());
+ case TIMESTAMP:
+ if (value.hasIsNull()) {
+ return com.google.cloud.spanner.Value.timestamp(null);
+ } else {
+ if (!value.hasBytesValue()) {
+ return com.google.cloud.spanner.Value.timestamp(
+ Timestamp.parseTimestamp(Timestamps.toString(value.getTimestampValue())));
+ } else {
+ return com.google.cloud.spanner.Value.timestamp(
+ com.google.cloud.spanner.Value.COMMIT_TIMESTAMP);
+ }
+ }
+ case DATE:
+ return com.google.cloud.spanner.Value.date(
+ value.hasIsNull() ? null : dateFromDays(value.getDateDaysValue()));
+ case NUMERIC:
+ {
+ if (value.hasIsNull()) {
+ return com.google.cloud.spanner.Value.numeric(null);
+ }
+ String ascii = value.getStringValue();
+ return com.google.cloud.spanner.Value.numeric(new BigDecimal(ascii));
+ }
+ case JSON:
+ return com.google.cloud.spanner.Value.json(
+ value.hasIsNull() ? null : value.getStringValue());
+ case STRUCT:
+ return com.google.cloud.spanner.Value.struct(
+ typeProtoToCloudType(type),
+ value.hasIsNull() ? null : structProtoToCloudStruct(type, value.getStructValue()));
+ case ARRAY:
+ switch (type.getArrayElementType().getCode()) {
+ case INT64:
+ if (value.hasIsNull()) {
+ return com.google.cloud.spanner.Value.int64Array((Iterable) null);
+ } else {
+ return com.google.cloud.spanner.Value.int64Array(
+ unmarshallValueList(
+ value.getArrayValue().getValueList().stream()
+ .map(com.google.spanner.executor.v1.Value::getIsNull)
+ .collect(Collectors.toList()),
+ value.getArrayValue().getValueList().stream()
+ .map(com.google.spanner.executor.v1.Value::getIntValue)
+ .collect(Collectors.toList())));
+ }
+ case FLOAT64:
+ if (value.hasIsNull()) {
+ return com.google.cloud.spanner.Value.float64Array((Iterable) null);
+ } else {
+ return com.google.cloud.spanner.Value.float64Array(
+ unmarshallValueList(
+ value.getArrayValue().getValueList().stream()
+ .map(com.google.spanner.executor.v1.Value::getIsNull)
+ .collect(Collectors.toList()),
+ value.getArrayValue().getValueList().stream()
+ .map(com.google.spanner.executor.v1.Value::getDoubleValue)
+ .collect(Collectors.toList())));
+ }
+ case STRING:
+ if (value.hasIsNull()) {
+ return com.google.cloud.spanner.Value.stringArray(null);
+ } else {
+ return com.google.cloud.spanner.Value.stringArray(
+ unmarshallValueList(
+ value.getArrayValue().getValueList().stream()
+ .map(com.google.spanner.executor.v1.Value::getIsNull)
+ .collect(Collectors.toList()),
+ value.getArrayValue().getValueList().stream()
+ .map(com.google.spanner.executor.v1.Value::getStringValue)
+ .collect(Collectors.toList())));
+ }
+ case BYTES:
+ if (value.hasIsNull()) {
+ return com.google.cloud.spanner.Value.bytesArray(null);
+ } else {
+ return com.google.cloud.spanner.Value.bytesArray(
+ unmarshallValueList(
+ value.getArrayValue().getValueList().stream()
+ .map(com.google.spanner.executor.v1.Value::getIsNull)
+ .collect(Collectors.toList()),
+ value.getArrayValue().getValueList().stream()
+ .map(com.google.spanner.executor.v1.Value::getBytesValue)
+ .collect(Collectors.toList()),
+ element -> ByteArray.copyFrom(element.toByteArray())));
+ }
+ case BOOL:
+ if (value.hasIsNull()) {
+ return com.google.cloud.spanner.Value.boolArray((Iterable) null);
+ } else {
+ return com.google.cloud.spanner.Value.boolArray(
+ unmarshallValueList(
+ value.getArrayValue().getValueList().stream()
+ .map(com.google.spanner.executor.v1.Value::getIsNull)
+ .collect(Collectors.toList()),
+ value.getArrayValue().getValueList().stream()
+ .map(com.google.spanner.executor.v1.Value::getBoolValue)
+ .collect(Collectors.toList())));
+ }
+ case TIMESTAMP:
+ if (value.hasIsNull()) {
+ return com.google.cloud.spanner.Value.timestampArray(null);
+ } else {
+ return com.google.cloud.spanner.Value.timestampArray(
+ unmarshallValueList(
+ value.getArrayValue().getValueList().stream()
+ .map(com.google.spanner.executor.v1.Value::getIsNull)
+ .collect(Collectors.toList()),
+ value.getArrayValue().getValueList().stream()
+ .map(com.google.spanner.executor.v1.Value::getTimestampValue)
+ .collect(Collectors.toList()),
+ element -> Timestamp.parseTimestamp(Timestamps.toString(element))));
+ }
+ case DATE:
+ if (value.hasIsNull()) {
+ return com.google.cloud.spanner.Value.dateArray(null);
+ } else {
+ return com.google.cloud.spanner.Value.dateArray(
+ unmarshallValueList(
+ value.getArrayValue().getValueList().stream()
+ .map(com.google.spanner.executor.v1.Value::getIsNull)
+ .collect(Collectors.toList()),
+ value.getArrayValue().getValueList().stream()
+ .map(com.google.spanner.executor.v1.Value::getDateDaysValue)
+ .collect(Collectors.toList()),
+ CloudClientExecutor::dateFromDays));
+ }
+ case NUMERIC:
+ {
+ if (value.hasIsNull()) {
+ return com.google.cloud.spanner.Value.numericArray(null);
+ }
+ List nullList =
+ value.getArrayValue().getValueList().stream()
+ .map(com.google.spanner.executor.v1.Value::getIsNull)
+ .collect(Collectors.toList());
+ List valueList =
+ value.getArrayValue().getValueList().stream()
+ .map(com.google.spanner.executor.v1.Value::getStringValue)
+ .collect(Collectors.toList());
+ List newValueList = new ArrayList<>(valueList.size());
+
+ for (int i = 0; i < valueList.size(); ++i) {
+ if (i < nullList.size() && nullList.get(i)) {
+ newValueList.add(null);
+ continue;
+ }
+ String ascii = valueList.get(i);
+ newValueList.add(new BigDecimal(ascii));
+ }
+ return com.google.cloud.spanner.Value.numericArray(newValueList);
+ }
+ case STRUCT:
+ com.google.cloud.spanner.Type elementType =
+ typeProtoToCloudType(type.getArrayElementType());
+ if (value.hasIsNull()) {
+ return com.google.cloud.spanner.Value.structArray(elementType, null);
+ } else {
+ return com.google.cloud.spanner.Value.structArray(
+ elementType,
+ unmarshallValueList(
+ value.getArrayValue().getValueList().stream()
+ .map(com.google.spanner.executor.v1.Value::getIsNull)
+ .collect(Collectors.toList()),
+ value.getArrayValue().getValueList().stream()
+ .map(com.google.spanner.executor.v1.Value::getStructValue)
+ .collect(Collectors.toList()),
+ element -> structProtoToCloudStruct(type.getArrayElementType(), element)));
+ }
+ case JSON:
+ if (value.hasIsNull()) {
+ return com.google.cloud.spanner.Value.jsonArray(null);
+ } else {
+ return com.google.cloud.spanner.Value.jsonArray(
+ unmarshallValueList(
+ value.getArrayValue().getValueList().stream()
+ .map(com.google.spanner.executor.v1.Value::getIsNull)
+ .collect(Collectors.toList()),
+ value.getArrayValue().getValueList().stream()
+ .map(com.google.spanner.executor.v1.Value::getStringValue)
+ .collect(Collectors.toList())));
+ }
+ default:
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT,
+ "Unsupported array element type while converting from value proto: "
+ + type.getArrayElementType().getCode().name());
+ }
+ }
+ // Unreachable
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unsupported type while converting from value proto: " + type);
+ }
+
+ /** Convert a cloud Timestamp to a Timestamp proto. */
+ private com.google.protobuf.Timestamp timestampToProto(Timestamp t) throws SpannerException {
+ try {
+ return Timestamps.parse(t.toString());
+ } catch (ParseException e) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Timestamp parse error", e);
+ }
+ }
+
+ /** Convert a cloud Date to a Date proto. */
+ private static int daysFromDate(Date date) {
+ return (int) LocalDate.of(date.getYear(), date.getMonth(), date.getDayOfMonth()).toEpochDay();
+ }
+
+ /** Convert a Date proto to a cloud Date. */
+ private static Date dateFromDays(int daysSinceEpoch) {
+ LocalDate localDate = LocalDate.ofEpochDay(daysSinceEpoch);
+ return Date.fromYearMonthDay(
+ localDate.getYear(), localDate.getMonthValue(), localDate.getDayOfMonth());
+ }
+
+ @Nullable
+ private static ByteString toByteString(@Nullable ByteArray byteArray) {
+ if (byteArray == null) {
+ return null;
+ }
+ return ByteString.copyFrom(byteArray.toByteArray());
+ }
+
+ @Nullable
+ private static ByteArray toByteArray(@Nullable ByteString byteString) {
+ if (byteString == null) {
+ return null;
+ }
+ return ByteArray.copyFrom(byteString.toByteArray());
+ }
+
+ /** Convert a list of nullable value to another type. */
+ private static List unmarshallValueList(
+ List isNullList, List valueList, Function converter) {
+ List newValueList = new ArrayList<>(valueList.size());
+ if (isNullList.isEmpty()) {
+ for (S value : valueList) {
+ newValueList.add(converter.apply(value));
+ }
+ } else {
+ for (int i = 0; i < valueList.size(); ++i) {
+ newValueList.add(isNullList.get(i) ? null : converter.apply(valueList.get(i)));
+ }
+ }
+ return newValueList;
+ }
+
+ /** Insert null into valueList according to isNullList. */
+ private static List unmarshallValueList(List isNullList, List valueList) {
+ return unmarshallValueList(isNullList, valueList, element -> element);
+ }
+
+ /** Convert a Struct proto to a cloud Struct. */
+ private static com.google.cloud.spanner.Struct structProtoToCloudStruct(
+ com.google.spanner.v1.Type type, com.google.spanner.executor.v1.ValueList structValue) {
+ List fieldValues = structValue.getValueList();
+ List fieldTypes = type.getStructType().getFieldsList();
+
+ if (fieldTypes.size() != fieldValues.size()) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT,
+ "Mismatch between number of expected fields and specified values for struct type");
+ }
+
+ com.google.cloud.spanner.Struct.Builder builder = com.google.cloud.spanner.Struct.newBuilder();
+ for (int i = 0; i < fieldTypes.size(); ++i) {
+ builder
+ .set(fieldTypes.get(i).getName())
+ .to(valueProtoToCloudValue(fieldTypes.get(i).getType(), fieldValues.get(i)));
+ }
+ return builder.build();
+ }
+
+ /** Convert a Type proto to a cloud Type. */
+ private static com.google.cloud.spanner.Type typeProtoToCloudType(
+ com.google.spanner.v1.Type typeProto) {
+ switch (typeProto.getCode()) {
+ case BOOL:
+ return com.google.cloud.spanner.Type.bool();
+ case INT64:
+ return com.google.cloud.spanner.Type.int64();
+ case STRING:
+ return com.google.cloud.spanner.Type.string();
+ case BYTES:
+ return com.google.cloud.spanner.Type.bytes();
+ case FLOAT64:
+ return com.google.cloud.spanner.Type.float64();
+ case DATE:
+ return com.google.cloud.spanner.Type.date();
+ case TIMESTAMP:
+ return com.google.cloud.spanner.Type.timestamp();
+ case NUMERIC:
+ if (typeProto.getTypeAnnotation().equals(TypeAnnotationCode.PG_NUMERIC)) {
+ return com.google.cloud.spanner.Type.pgNumeric();
+ } else {
+ return com.google.cloud.spanner.Type.numeric();
+ }
+ case STRUCT:
+ List fields = typeProto.getStructType().getFieldsList();
+ List cloudFields = new ArrayList<>();
+ for (StructType.Field field : fields) {
+ com.google.cloud.spanner.Type fieldType = typeProtoToCloudType(field.getType());
+ cloudFields.add(com.google.cloud.spanner.Type.StructField.of(field.getName(), fieldType));
+ }
+ return com.google.cloud.spanner.Type.struct(cloudFields);
+ case ARRAY:
+ com.google.spanner.v1.Type elementType = typeProto.getArrayElementType();
+ if (elementType.getCode() == TypeCode.ARRAY) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unsupported array-of-array proto type");
+ } else {
+ com.google.cloud.spanner.Type cloudElementType = typeProtoToCloudType(elementType);
+ return com.google.cloud.spanner.Type.array(cloudElementType);
+ }
+ case JSON:
+ if (typeProto.getTypeAnnotation().equals(TypeAnnotationCode.PG_JSONB)) {
+ return com.google.cloud.spanner.Type.pgJsonb();
+ } else {
+ return com.google.cloud.spanner.Type.json();
+ }
+ default:
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unsupported proto type: " + typeProto);
+ }
+ }
+
+ /** Convert a cloud Type to a Type proto. */
+ private static com.google.spanner.v1.Type cloudTypeToTypeProto(@NotNull Type cloudTypeProto) {
+ switch (cloudTypeProto.getCode()) {
+ case BOOL:
+ return com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.BOOL).build();
+ case INT64:
+ return com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.INT64).build();
+ case FLOAT64:
+ return com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.FLOAT64).build();
+ case STRING:
+ return com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.STRING).build();
+ case BYTES:
+ return com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.BYTES).build();
+ case TIMESTAMP:
+ return com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.TIMESTAMP).build();
+ case DATE:
+ return com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.DATE).build();
+ case NUMERIC:
+ return com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.NUMERIC).build();
+ case PG_NUMERIC:
+ return com.google.spanner.v1.Type.newBuilder()
+ .setCode(TypeCode.NUMERIC)
+ .setTypeAnnotation(TypeAnnotationCode.PG_NUMERIC)
+ .build();
+ case STRUCT:
+ com.google.spanner.v1.StructType.Builder StructDescriptorBuilder =
+ com.google.spanner.v1.StructType.newBuilder();
+ for (com.google.cloud.spanner.Type.StructField cloudField :
+ cloudTypeProto.getStructFields()) {
+ StructDescriptorBuilder.addFields(
+ com.google.spanner.v1.StructType.Field.newBuilder()
+ .setName(cloudField.getName())
+ .setType(cloudTypeToTypeProto(cloudField.getType())));
+ }
+ return com.google.spanner.v1.Type.newBuilder()
+ .setCode(TypeCode.STRUCT)
+ .setStructType(StructDescriptorBuilder.build())
+ .build();
+ case ARRAY:
+ if (cloudTypeProto.getArrayElementType().getCode()
+ == com.google.cloud.spanner.Type.Code.ARRAY) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unsupported array-of-array cloud type");
+ } else {
+ return com.google.spanner.v1.Type.newBuilder()
+ .setCode(TypeCode.ARRAY)
+ .setArrayElementType(cloudTypeToTypeProto(cloudTypeProto.getArrayElementType()))
+ .build();
+ }
+ case JSON:
+ return com.google.spanner.v1.Type.newBuilder().setCode(TypeCode.JSON).build();
+ case PG_JSONB:
+ return com.google.spanner.v1.Type.newBuilder()
+ .setCode(TypeCode.JSON)
+ .setTypeAnnotation(TypeAnnotationCode.PG_JSONB)
+ .build();
+ default:
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unsupported cloud type: " + cloudTypeProto);
+ }
+ }
+
+ /** Unmarshall ByteString to serializable object. */
+ private T unmarshall(ByteString input)
+ throws IOException, ClassNotFoundException {
+ ObjectInputStream objectInputStream = new ObjectInputStream(input.newInput());
+ return (T) objectInputStream.readObject();
+ }
+
+ /** Marshall a serializable object into ByteString. */
+ private ByteString marshall(T object) throws IOException {
+ ByteString.Output output = ByteString.newOutput();
+ ObjectOutputStream objectOutputStream = new ObjectOutputStream(output);
+ objectOutputStream.writeObject(object);
+ objectOutputStream.flush();
+ objectOutputStream.close();
+ return output.toByteString();
+ }
+
+ /** Build Timestamp from micros. */
+ private Timestamp timestampFromMicros(long micros) {
+ long seconds = TimeUnit.MICROSECONDS.toSeconds(micros);
+ int nanos = (int) (micros * 1000 - seconds * 1000000000);
+ return Timestamp.ofTimeSecondsAndNanos(seconds, nanos);
+ }
+
+ /** Build TimestampBound from Concurrency. */
+ private TimestampBound timestampBoundsFromConcurrency(Concurrency concurrency) {
+ if (concurrency.hasStalenessSeconds()) {
+ return TimestampBound.ofExactStaleness(
+ (long) (concurrency.getStalenessSeconds() * 1000000), TimeUnit.MICROSECONDS);
+ } else if (concurrency.hasMinReadTimestampMicros()) {
+ return TimestampBound.ofMinReadTimestamp(
+ timestampFromMicros(concurrency.getMinReadTimestampMicros()));
+ } else if (concurrency.hasMaxStalenessSeconds()) {
+ return TimestampBound.ofMaxStaleness(
+ (long) (concurrency.getMaxStalenessSeconds() * 1000000), TimeUnit.MICROSECONDS);
+ } else if (concurrency.hasExactTimestampMicros()) {
+ return TimestampBound.ofReadTimestamp(
+ timestampFromMicros(concurrency.getExactTimestampMicros()));
+ } else if (concurrency.hasStrong()) {
+ return TimestampBound.strong();
+ } else if (concurrency.hasBatch()) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT,
+ "batch mode should not be in snapshot transaction: " + concurrency);
+ }
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unsupported concurrency mode: " + concurrency);
+ }
+
+ /** Build instance proto from cloud spanner instance. */
+ private com.google.spanner.admin.instance.v1.Instance instanceToProto(Instance instance) {
+ com.google.spanner.admin.instance.v1.Instance.Builder instanceBuilder =
+ com.google.spanner.admin.instance.v1.Instance.newBuilder();
+ instanceBuilder
+ .setConfig(instance.getInstanceConfigId().getInstanceConfig())
+ .setName(instance.getId().getName())
+ .setDisplayName(instance.getDisplayName())
+ .setCreateTime(instance.getCreateTime().toProto())
+ .setNodeCount(instance.getNodeCount())
+ .setProcessingUnits(instance.getProcessingUnits())
+ .setUpdateTime(instance.getUpdateTime().toProto())
+ .putAllLabels(instance.getLabels());
+ com.google.spanner.admin.instance.v1.Instance.State state;
+ switch (instance.getState()) {
+ case UNSPECIFIED:
+ state = State.STATE_UNSPECIFIED;
+ break;
+ case CREATING:
+ state = State.CREATING;
+ break;
+ case READY:
+ state = State.READY;
+ break;
+ default:
+ throw new IllegalArgumentException("Unknown state:" + instance.getState());
+ }
+ instanceBuilder.setState(state);
+ return instanceBuilder.build();
+ }
+
+ /** Build instance proto from cloud spanner instance. */
+ private com.google.spanner.admin.instance.v1.InstanceConfig instanceConfigToProto(
+ InstanceConfig instanceConfig) {
+ com.google.spanner.admin.instance.v1.InstanceConfig.Builder instanceConfigBuilder =
+ com.google.spanner.admin.instance.v1.InstanceConfig.newBuilder();
+ instanceConfigBuilder
+ .setDisplayName(instanceConfig.getDisplayName())
+ .setEtag(instanceConfig.getEtag())
+ .setName(instanceConfig.getId().getName())
+ .addAllLeaderOptions(instanceConfig.getLeaderOptions())
+ .addAllOptionalReplicas(
+ instanceConfig.getOptionalReplicas().stream()
+ .map(ReplicaInfo::getProto)
+ .collect(Collectors.toList()))
+ .addAllReplicas(
+ instanceConfig.getReplicas().stream()
+ .map(ReplicaInfo::getProto)
+ .collect(Collectors.toList()))
+ .putAllLabels(instanceConfig.getLabels())
+ .setReconciling(instanceConfig.getReconciling());
+ com.google.spanner.admin.instance.v1.InstanceConfig.State state;
+ switch (instanceConfig.getState()) {
+ case STATE_UNSPECIFIED:
+ state = com.google.spanner.admin.instance.v1.InstanceConfig.State.STATE_UNSPECIFIED;
+ break;
+ case CREATING:
+ state = com.google.spanner.admin.instance.v1.InstanceConfig.State.CREATING;
+ break;
+ case READY:
+ state = com.google.spanner.admin.instance.v1.InstanceConfig.State.READY;
+ break;
+ default:
+ throw new IllegalArgumentException("Unknown state:" + instanceConfig.getState());
+ }
+ instanceConfigBuilder.setState(state);
+ com.google.spanner.admin.instance.v1.InstanceConfig.Type type;
+ switch (instanceConfig.getConfigType()) {
+ case TYPE_UNSPECIFIED:
+ type = com.google.spanner.admin.instance.v1.InstanceConfig.Type.TYPE_UNSPECIFIED;
+ break;
+ case GOOGLE_MANAGED:
+ type = com.google.spanner.admin.instance.v1.InstanceConfig.Type.GOOGLE_MANAGED;
+ break;
+ case USER_MANAGED:
+ type = com.google.spanner.admin.instance.v1.InstanceConfig.Type.USER_MANAGED;
+ break;
+ default:
+ throw new IllegalArgumentException("Unknown type:" + instanceConfig.getConfigType());
+ }
+ instanceConfigBuilder.setConfigType(type);
+ if (instanceConfig.getBaseConfig() != null) {
+ instanceConfigBuilder.setBaseConfig(instanceConfig.getBaseConfig().getId().getName());
+ }
+ return instanceConfigBuilder.build();
+ }
+}
diff --git a/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudExecutor.java b/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudExecutor.java
new file mode 100644
index 00000000000..bf5f56aadca
--- /dev/null
+++ b/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudExecutor.java
@@ -0,0 +1,476 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.cloud.executor.spanner;
+
+import com.google.api.client.http.javanet.NetHttpTransport;
+import com.google.auth.http.HttpTransportFactory;
+import com.google.cloud.executor.spanner.CloudClientExecutor.ExecutionFlowContext;
+import com.google.cloud.spanner.ErrorCode;
+import com.google.cloud.spanner.SpannerException;
+import com.google.cloud.spanner.SpannerExceptionFactory;
+import com.google.common.base.Preconditions;
+import com.google.protobuf.Timestamp;
+import com.google.protobuf.util.Timestamps;
+import com.google.spanner.executor.v1.ChangeStreamRecord;
+import com.google.spanner.executor.v1.ChildPartitionsRecord;
+import com.google.spanner.executor.v1.ColumnMetadata;
+import com.google.spanner.executor.v1.QueryResult;
+import com.google.spanner.executor.v1.ReadResult;
+import com.google.spanner.executor.v1.SpannerActionOutcome;
+import com.google.spanner.executor.v1.SpannerAsyncActionResponse;
+import com.google.spanner.executor.v1.TableMetadata;
+import com.google.spanner.v1.StructType;
+import io.grpc.Status;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import java.util.regex.Pattern;
+
+/** Superclass of cloud Java Client implementations for cloud requests. */
+public abstract class CloudExecutor {
+
+ private static final Logger LOGGER = Logger.getLogger(CloudExecutor.class.getName());
+
+ // Pattern for a database name: projects//instances//databases/
+ protected static final Pattern DB_NAME =
+ Pattern.compile(
+ "projects/([A-Za-z0-9-_]+)/instances/([A-Za-z0-9-_]+)/databases/([A-Za-z0-9-_]+)");
+
+ // Project id.
+ protected static final String PROJECT_ID = "spanner-cloud-systest";
+
+ // Transport factory.
+ protected static final HttpTransportFactory HTTP_TRANSPORT_FACTORY = NetHttpTransport::new;
+
+ // Indicate whether grpc fault injector should be enabled.
+ protected boolean enableGrpcFaultInjector;
+
+ /**
+ * Metadata is used to hold and retrieve metadata of tables and columns involved in a transaction.
+ */
+ public static class Metadata {
+
+ private final Map> tableKeyColumnsInOrder;
+ private final Map> tableColumnsByName;
+
+ /** Init metadata from list of tableMetadata in startTransaction action. */
+ public Metadata(List metadata) {
+ tableKeyColumnsInOrder = new HashMap<>();
+ tableColumnsByName = new HashMap<>();
+ for (TableMetadata table : metadata) {
+ String tableName = table.getName();
+ tableKeyColumnsInOrder.put(tableName, table.getKeyColumnList());
+ tableColumnsByName.put(tableName, new HashMap<>());
+ for (int j = 0; j < table.getColumnCount(); ++j) {
+ ColumnMetadata column = table.getColumn(j);
+ tableColumnsByName.get(tableName).put(column.getName(), column);
+ }
+ }
+ }
+
+ public List getKeyColumnTypes(String tableName)
+ throws SpannerException {
+ if (!tableKeyColumnsInOrder.containsKey(tableName)) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "There is no metadata for table: " + tableName);
+ }
+ List typeList = new ArrayList<>();
+ List columns = tableKeyColumnsInOrder.get(tableName);
+ for (ColumnMetadata column : columns) {
+ typeList.add(column.getType());
+ }
+ return typeList;
+ }
+
+ /** Return column type of the given table and column. */
+ public com.google.spanner.v1.Type getColumnType(String tableName, String columnName)
+ throws SpannerException {
+ if (!tableColumnsByName.containsKey(tableName)) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "There is no metadata for table: " + tableName);
+ }
+ Map columnList = tableColumnsByName.get(tableName);
+ if (!columnList.containsKey(columnName)) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT,
+ "Metadata for table " + tableName + " contains no column named " + columnName);
+ }
+ return columnList.get(columnName).getType();
+ }
+ }
+
+ /**
+ * OutcomeSender is a utility class used for sending action outcomes back to the client. For read
+ * actions, it buffers rows and sends partial read results in batches.
+ */
+ public class OutcomeSender {
+
+ private final int actionId;
+ private final ExecutionFlowContext context;
+
+ // All the relevant variables below should be set before first outcome is sent back, and unused
+ // variables should leave null.
+ private Timestamp timestamp;
+ private boolean hasReadResult;
+ private boolean hasQueryResult;
+ private boolean hasChangeStreamRecords;
+ private String table;
+ private String index;
+ private Integer requestIndex;
+ private StructType rowType;
+
+ // PartialOutcome accumulates rows and relevant information.
+ private SpannerActionOutcome.Builder partialOutcomeBuilder;
+ private ReadResult.Builder readResultBuilder;
+ private QueryResult.Builder queryResultBuilder;
+
+ // Current row count in Read/Query result.
+ private int rowCount;
+ // Modified row count in DML result.
+ private final List rowsModified = new ArrayList<>();
+ // Current ChangeStreamRecord count in Cloud result.
+ private int changeStreamRecordCount;
+ // Change stream records to be returned.
+ private final List changeStreamRecords = new ArrayList<>();
+ // Change stream related variables.
+ private String partitionTokensString = "[";
+ private String dataChangeRecordsString = "[";
+ private String changeStreamForQuery = "";
+ private String partitionTokenForQuery = "";
+
+ // The timestamp in milliseconds of when the last ChangeStreamRecord received.
+ private long changeStreamRecordReceivedTimestamp;
+ // The heartbeat interval for the change stream query in milliseconds.
+ private long changeStreamHeartbeatMilliseconds;
+ // Whether the change stream query is a partitioned change stream query.
+ private boolean isPartitionedChangeStreamQuery;
+
+ // If row count exceed this value, we should send rows back in batch.
+ private static final int MAX_ROWS_PER_BATCH = 100;
+ // If change stream record count exceed this value, send change stream records back in batch.
+ private static final int MAX_CHANGE_STREAM_RECORDS_PER_BATCH = 2000;
+
+ public OutcomeSender(int actionId, ExecutionFlowContext context) {
+ this.actionId = actionId;
+ this.context = context;
+ this.index = null;
+ this.rowType = null;
+ this.requestIndex = null;
+ this.timestamp = Timestamp.newBuilder().setSeconds(0).setNanos(0).build();
+ }
+
+ /** Set the timestamp for commit. */
+ public void setTimestamp(Timestamp timestamp) {
+ this.timestamp = timestamp;
+ }
+
+ /** Set the rowType for appending row. */
+ public void setRowType(StructType rowType) {
+ this.rowType = rowType;
+ }
+
+ /** Init the sender for read action, then set the table and index if there exists. */
+ public void initForRead(String table, String index) {
+ this.hasReadResult = true;
+ this.table = table;
+ if (!index.isEmpty()) {
+ this.index = index;
+ }
+ }
+
+ /** Init the sender for query action. */
+ public void initForQuery() {
+ this.hasQueryResult = true;
+ }
+
+ /** Init the sender for batch read action, then set the table and index if there exists. */
+ public void initForBatchRead(String table, String index) {
+ initForRead(table, index);
+ // Cloud API supports only simple batch reads (not multi reads), so request index
+ // is always 0
+ this.requestIndex = 0;
+ }
+
+ /** Init the sender for change stream query action. */
+ public void initForChangeStreamQuery(
+ long changeStreamHeartbeatMilliseconds, String changeStreamName, String partitionToken) {
+ this.hasChangeStreamRecords = true;
+ this.changeStreamRecordReceivedTimestamp = 0;
+ this.changeStreamHeartbeatMilliseconds = changeStreamHeartbeatMilliseconds;
+ this.changeStreamForQuery = changeStreamName;
+ if (!partitionToken.isEmpty()) {
+ this.isPartitionedChangeStreamQuery = true;
+ this.partitionTokenForQuery = partitionToken;
+ }
+ }
+
+ /** Update change stream record timestamp. */
+ public void updateChangeStreamRecordReceivedTimestamp(
+ long changeStreamRecordReceivedTimestamp) {
+ this.changeStreamRecordReceivedTimestamp = changeStreamRecordReceivedTimestamp;
+ }
+
+ /** Add rows modified in DML to result. */
+ public void appendRowsModifiedInDml(Long rowsModified) {
+ this.rowsModified.add(rowsModified);
+ }
+
+ public long getChangeStreamRecordReceivedTimestamp() {
+ return this.changeStreamRecordReceivedTimestamp;
+ }
+
+ public long getChangeStreamHeartbeatMilliSeconds() {
+ return this.changeStreamHeartbeatMilliseconds;
+ }
+
+ public boolean getIsPartitionedChangeStreamQuery() {
+ return this.isPartitionedChangeStreamQuery;
+ }
+
+ /** Send the last outcome with OK status. */
+ public Status finishWithOK() {
+ buildOutcome();
+ partialOutcomeBuilder.setStatus(toProto(Status.OK));
+ return flush();
+ }
+
+ /** Send the last outcome with aborted error, this will set the transactionRestarted to true. */
+ public Status finishWithTransactionRestarted() {
+ buildOutcome();
+ partialOutcomeBuilder.setTransactionRestarted(true);
+ partialOutcomeBuilder.setStatus(toProto(Status.OK));
+ return flush();
+ }
+
+ /** Send the last outcome with given error status. */
+ public Status finishWithError(Status err) {
+ buildOutcome();
+ partialOutcomeBuilder.setStatus(toProto(err));
+ return flush();
+ }
+
+ /**
+ * Add another row to buffer. If buffer hits its size limit, buffered rows will be sent back.
+ */
+ public Status appendRow(com.google.spanner.executor.v1.ValueList row) {
+ if (!hasReadResult && !hasQueryResult) {
+ return toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT,
+ "Either hasReadResult or hasQueryResult should be true"));
+ }
+ if (rowType == null) {
+ return toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "RowType should be set first"));
+ }
+ buildOutcome();
+ if (hasReadResult) {
+ readResultBuilder.addRow(row);
+ ++rowCount;
+ } else if (hasQueryResult) {
+ queryResultBuilder.addRow(row);
+ ++rowCount;
+ }
+ if (rowCount >= MAX_ROWS_PER_BATCH) {
+ return flush();
+ }
+ return Status.OK;
+ }
+
+ /** Append change stream record to result. */
+ public Status appendChangeStreamRecord(ChangeStreamRecord record) {
+ if (!hasChangeStreamRecords) {
+ return toStatus(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "hasChangeStreamRecords should be true"));
+ }
+ if (record.hasDataChange()) {
+ String appendedString =
+ String.format(
+ "{%s, %s}, ",
+ record.getDataChange().getTransactionId(),
+ record.getDataChange().getRecordSequence());
+ dataChangeRecordsString += appendedString;
+ } else if (record.hasChildPartition()) {
+ for (ChildPartitionsRecord.ChildPartition childPartition :
+ record.getChildPartition().getChildPartitionsList()) {
+ partitionTokensString = partitionTokensString.concat(childPartition.getToken() + ", ");
+ }
+ }
+ buildOutcome();
+ changeStreamRecords.add(record);
+ ++changeStreamRecordCount;
+ if (changeStreamRecordCount >= MAX_CHANGE_STREAM_RECORDS_PER_BATCH) {
+ return flush();
+ }
+ return Status.OK;
+ }
+
+ /** Build the partialOutcome if not exists using relevant variables. */
+ private void buildOutcome() {
+ if (partialOutcomeBuilder != null) {
+ return;
+ }
+ partialOutcomeBuilder = SpannerActionOutcome.newBuilder();
+ partialOutcomeBuilder.setCommitTime(timestamp);
+ if (hasReadResult) {
+ readResultBuilder = ReadResult.newBuilder();
+ readResultBuilder.setTable(table);
+ if (index != null) {
+ readResultBuilder.setIndex(index);
+ }
+ if (rowType != null) {
+ readResultBuilder.setRowType(rowType);
+ }
+ if (requestIndex != null) {
+ readResultBuilder.setRequestIndex(requestIndex);
+ }
+ } else if (hasQueryResult) {
+ queryResultBuilder = QueryResult.newBuilder();
+ if (rowType != null) {
+ queryResultBuilder.setRowType(rowType);
+ }
+ }
+ }
+
+ /** Send partialOutcome to stream and clear the internal state. */
+ private Status flush() {
+ Preconditions.checkNotNull(partialOutcomeBuilder);
+ for (Long rowCount : rowsModified) {
+ partialOutcomeBuilder.addDmlRowsModified(rowCount);
+ }
+ if (hasReadResult) {
+ partialOutcomeBuilder.setReadResult(readResultBuilder.build());
+ } else if (hasQueryResult) {
+ partialOutcomeBuilder.setQueryResult(queryResultBuilder.build());
+ } else if (hasChangeStreamRecords) {
+ partialOutcomeBuilder.addAllChangeStreamRecords(changeStreamRecords);
+ partitionTokensString += "]\n";
+ dataChangeRecordsString += "]\n";
+ LOGGER.log(
+ Level.INFO,
+ String.format(
+ "OutcomeSender with action ID %s for change stream %s and partition token %s is "
+ + "sending data change records with the following transaction id/record sequence "
+ + "combinations: %s and partition tokens: %s",
+ this.changeStreamForQuery,
+ this.partitionTokenForQuery,
+ actionId,
+ dataChangeRecordsString,
+ partitionTokensString));
+ partitionTokensString = "";
+ dataChangeRecordsString = "";
+ }
+ Status status = sendOutcome(partialOutcomeBuilder.build());
+ partialOutcomeBuilder = null;
+ readResultBuilder = null;
+ queryResultBuilder = null;
+ rowCount = 0;
+ rowsModified.clear();
+ changeStreamRecordCount = 0;
+ changeStreamRecords.clear();
+ return status;
+ }
+
+ /** Send the given SpannerActionOutcome. */
+ public Status sendOutcome(SpannerActionOutcome outcome) {
+ try {
+ LOGGER.log(Level.INFO, String.format("Sending result %s actionId %s", outcome, actionId));
+ SpannerAsyncActionResponse result =
+ SpannerAsyncActionResponse.newBuilder()
+ .setActionId(actionId)
+ .setOutcome(outcome)
+ .build();
+ context.onNext(result);
+ LOGGER.log(Level.INFO, String.format("Sent result %s actionId %s", outcome, actionId));
+ } catch (SpannerException e) {
+ LOGGER.log(Level.SEVERE, "Failed to send outcome with error: " + e.getMessage(), e);
+ return toStatus(e);
+ } catch (Throwable t) {
+ LOGGER.log(Level.SEVERE, "Failed to send outcome with error: " + t.getMessage(), t);
+ return Status.fromThrowable(
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, "Unexpected error during rpc send: " + t));
+ }
+ return Status.OK;
+ }
+ }
+
+ /** Map Cloud ErrorCode to Status. */
+ protected Status toStatus(SpannerException e) {
+ switch (e.getErrorCode()) {
+ case INVALID_ARGUMENT:
+ return Status.fromCode(Status.INVALID_ARGUMENT.getCode()).withDescription(e.getMessage());
+ case PERMISSION_DENIED:
+ return Status.fromCode(Status.PERMISSION_DENIED.getCode()).withDescription(e.getMessage());
+ case ABORTED:
+ return Status.fromCode(Status.ABORTED.getCode()).withDescription(e.getMessage());
+ case ALREADY_EXISTS:
+ return Status.fromCode(Status.ALREADY_EXISTS.getCode()).withDescription(e.getMessage());
+ case CANCELLED:
+ return Status.fromCode(Status.CANCELLED.getCode()).withDescription(e.getMessage());
+ case INTERNAL:
+ return Status.fromCode(Status.INTERNAL.getCode()).withDescription(e.getMessage());
+ case FAILED_PRECONDITION:
+ return Status.fromCode(Status.FAILED_PRECONDITION.getCode())
+ .withDescription(e.getMessage());
+ case NOT_FOUND:
+ return Status.fromCode(Status.NOT_FOUND.getCode()).withDescription(e.getMessage());
+ case DEADLINE_EXCEEDED:
+ return Status.fromCode(Status.DEADLINE_EXCEEDED.getCode()).withDescription(e.getMessage());
+ case RESOURCE_EXHAUSTED:
+ return Status.fromCode(Status.RESOURCE_EXHAUSTED.getCode()).withDescription(e.getMessage());
+ case OUT_OF_RANGE:
+ return Status.fromCode(Status.OUT_OF_RANGE.getCode()).withDescription(e.getMessage());
+ case UNAUTHENTICATED:
+ return Status.fromCode(Status.UNAUTHENTICATED.getCode()).withDescription(e.getMessage());
+ case UNIMPLEMENTED:
+ return Status.fromCode(Status.UNIMPLEMENTED.getCode()).withDescription(e.getMessage());
+ case UNAVAILABLE:
+ return Status.fromCode(Status.UNAVAILABLE.getCode()).withDescription(e.getMessage());
+ case UNKNOWN:
+ return Status.fromCode(Status.UNKNOWN.getCode()).withDescription(e.getMessage());
+ default:
+ return Status.fromCode(Status.UNKNOWN.getCode())
+ .withDescription("Unsupported Spanner error code: " + e.getErrorCode());
+ }
+ }
+
+ /** Convert a Status to a Status Proto. */
+ protected static com.google.rpc.Status toProto(Status status) {
+ return com.google.rpc.Status.newBuilder()
+ .setCode(status.getCode().value())
+ .setMessage(status.getDescription() == null ? "" : status.getDescription())
+ .build();
+ }
+
+ /**
+ * Converts timestamp microseconds to query-friendly timestamp string. If useNanosPrecision is set
+ * to true it pads input timestamp with 3 random digits treating it as timestamp nanoseconds.
+ */
+ protected static String timestampToString(boolean useNanosPrecision, long timestampInMicros) {
+ Timestamp timestamp =
+ useNanosPrecision
+ ? Timestamps.fromNanos(timestampInMicros * 1000 + System.nanoTime() % 1000)
+ : Timestamps.fromMicros(timestampInMicros);
+ return String.format("\"%s\"", Timestamps.toString(timestamp));
+ }
+}
diff --git a/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudExecutorImpl.java b/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudExecutorImpl.java
new file mode 100644
index 00000000000..548392b5c84
--- /dev/null
+++ b/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudExecutorImpl.java
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.cloud.executor.spanner;
+
+import com.google.cloud.spanner.ErrorCode;
+import com.google.cloud.spanner.SpannerExceptionFactory;
+import com.google.spanner.executor.v1.SpannerAsyncActionRequest;
+import com.google.spanner.executor.v1.SpannerAsyncActionResponse;
+import com.google.spanner.executor.v1.SpannerExecutorProxyGrpc;
+import io.grpc.Status;
+import io.grpc.stub.StreamObserver;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+/** Send proxied action requests through Spanner Cloud API. */
+public class CloudExecutorImpl extends SpannerExecutorProxyGrpc.SpannerExecutorProxyImplBase {
+
+ private static final Logger LOGGER = Logger.getLogger(CloudExecutorImpl.class.getName());
+
+ // Executors to proxy.
+ private final CloudClientExecutor clientExecutor;
+
+ public CloudExecutorImpl(boolean enableGrpcFaultInjector) {
+ clientExecutor = new CloudClientExecutor(enableGrpcFaultInjector);
+ }
+
+ /** Execute SpannerAsync action requests. */
+ @Override
+ public StreamObserver executeActionAsync(
+ StreamObserver responseObserver) {
+ CloudClientExecutor.ExecutionFlowContext executionContext =
+ clientExecutor.new ExecutionFlowContext(responseObserver);
+ return new StreamObserver() {
+ @Override
+ public void onNext(SpannerAsyncActionRequest request) {
+ LOGGER.log(Level.INFO, String.format("Receiving request: \n%s", request));
+ Status status = clientExecutor.startHandlingRequest(request, executionContext);
+ if (!status.isOk()) {
+ LOGGER.log(
+ Level.WARNING,
+ "Failed to handle request, half closed",
+ SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT, status.getDescription()));
+ }
+ }
+
+ @Override
+ public void onError(Throwable t) {
+ LOGGER.log(Level.WARNING, "Client ends the stream with error.", t);
+ executionContext.cleanup();
+ }
+
+ @Override
+ public void onCompleted() {
+ LOGGER.log(Level.INFO, "Client called Done, half closed");
+ executionContext.cleanup();
+ responseObserver.onCompleted();
+ }
+ };
+ }
+}
diff --git a/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudUtil.java b/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudUtil.java
new file mode 100644
index 00000000000..e58d124285c
--- /dev/null
+++ b/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/CloudUtil.java
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.cloud.executor.spanner;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import com.google.api.gax.grpc.GrpcTransportChannel;
+import com.google.api.gax.rpc.FixedTransportChannelProvider;
+import com.google.api.gax.rpc.TransportChannel;
+import com.google.api.gax.rpc.TransportChannelProvider;
+import com.google.common.net.HostAndPort;
+import io.grpc.ManagedChannelBuilder;
+import io.grpc.netty.shaded.io.grpc.netty.GrpcSslContexts;
+import io.grpc.netty.shaded.io.grpc.netty.InternalNettyChannelBuilder;
+import io.grpc.netty.shaded.io.grpc.netty.NegotiationType;
+import io.grpc.netty.shaded.io.grpc.netty.NettyChannelBuilder;
+import io.grpc.netty.shaded.io.netty.handler.ssl.SslContext;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStreamWriter;
+import java.io.Writer;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+public class CloudUtil {
+
+ // If this is set too low, the peer server may return RESOURCE_EXHAUSTED errors if the response
+ // error message causes the trailing headers to exceed this limit.
+ private static final int GRPC_MAX_HEADER_LIST_SIZE_BYTES = 10 * 1024 * 1024;
+
+ private static final String TEST_HOST_IN_CERT = "test_cert_2";
+
+ public static TransportChannelProvider newChannelProviderHelper(int port) {
+ NettyChannelBuilder builder =
+ (NettyChannelBuilder)
+ getChannelBuilderForTestGFE("localhost", port, WorkerProxy.cert, TEST_HOST_IN_CERT)
+ .maxInboundMessageSize(100 * 1024 * 1024 /* 100 MB */);
+ if (WorkerProxy.usePlainTextChannel) {
+ builder.usePlaintext();
+ }
+ TransportChannel channel =
+ GrpcTransportChannel.newBuilder()
+ .setManagedChannel(
+ builder.maxInboundMetadataSize(GRPC_MAX_HEADER_LIST_SIZE_BYTES).build())
+ .build();
+ return FixedTransportChannelProvider.create(channel);
+ }
+
+ public static ManagedChannelBuilder> getChannelBuilderForTestGFE(
+ String host, int sslPort, String certPath, String hostInCert) {
+ SslContext sslContext;
+ try {
+ sslContext =
+ GrpcSslContexts.forClient()
+ .trustManager(CertUtil.copyCert(certPath))
+ .ciphers(null)
+ .build();
+ } catch (IOException ex) {
+ throw new RuntimeException(ex);
+ }
+
+ HostAndPort hostPort = HostAndPort.fromParts(host, sslPort);
+ String target;
+ try {
+ target = new URI("dns", "", "/" + hostPort, null).toString();
+ } catch (URISyntaxException ex) {
+ throw new RuntimeException(ex);
+ }
+ try {
+ NettyChannelBuilder channelBuilder = NettyChannelBuilder.forTarget(target);
+ InternalNettyChannelBuilder.disableCheckAuthority(channelBuilder);
+
+ return channelBuilder
+ .overrideAuthority(hostInCert)
+ .sslContext(sslContext)
+ .negotiationType(NegotiationType.TLS);
+ } catch (Throwable t) {
+ throw new RuntimeException(t);
+ }
+ }
+
+ static final class CertUtil {
+ private CertUtil() {
+ // prevent instantiation
+ }
+
+ /** Copies cert resource to file, stripping out PEM comments. */
+ public static File copyCert(String certFileName) throws IOException {
+ File certFile = new File(certFileName);
+ File file = File.createTempFile("CAcert", "pem");
+ file.deleteOnExit();
+ try (BufferedReader in =
+ new BufferedReader(new InputStreamReader(new FileInputStream(certFile), UTF_8));
+ Writer out = new OutputStreamWriter(new FileOutputStream(file), UTF_8)) {
+ String line;
+ do {
+ while ((line = in.readLine()) != null) {
+ if ("-----BEGIN CERTIFICATE-----".equals(line)) {
+ break;
+ }
+ }
+ out.append(line);
+ out.append("\n");
+ while ((line = in.readLine()) != null) {
+ out.append(line);
+ out.append("\n");
+ if ("-----END CERTIFICATE-----".equals(line)) {
+ break;
+ }
+ }
+ } while (line != null);
+ }
+ return file;
+ }
+ }
+}
diff --git a/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/WorkerProxy.java b/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/WorkerProxy.java
new file mode 100644
index 00000000000..08809d45887
--- /dev/null
+++ b/google-cloud-spanner-executor/src/main/java/com/google/cloud/executor/spanner/WorkerProxy.java
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.cloud.executor.spanner;
+
+import com.google.cloud.spanner.ErrorCode;
+import com.google.cloud.spanner.SpannerExceptionFactory;
+import io.grpc.Server;
+import io.grpc.ServerBuilder;
+import io.grpc.protobuf.services.HealthStatusManager;
+import io.grpc.protobuf.services.ProtoReflectionService;
+import java.io.IOException;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+
+/**
+ * Worker proxy for Java API. This is the main entry of the Java client proxy on cloud Spanner Java
+ * client.
+ */
+public class WorkerProxy {
+
+ private static final Logger LOGGER = Logger.getLogger(WorkerProxy.class.getName());
+
+ private static final String OPTION_SPANNER_PORT = "spanner_port";
+ private static final String OPTION_PROXY_PORT = "proxy_port";
+ private static final String OPTION_CERTIFICATE = "cert";
+ private static final String OPTION_SERVICE_KEY_FILE = "service_key_file";
+ private static final String OPTION_USE_PLAIN_TEXT_CHANNEL = "use_plain_text_channel";
+ private static final String OPTION_ENABLE_GRPC_FAULT_INJECTOR = "enable_grpc_fault_injector";
+
+ public static int spannerPort = 0;
+ public static int proxyPort = 0;
+ public static String cert = "";
+ public static String serviceKeyFile = "";
+ public static boolean usePlainTextChannel = false;
+ public static boolean enableGrpcFaultInjector = false;
+
+ public static CommandLine commandLine;
+
+ private static final int MIN_PORT = 0, MAX_PORT = 65535;
+
+ public static void main(String[] args) throws Exception {
+ commandLine = buildOptions(args);
+
+ if (!commandLine.hasOption(OPTION_SPANNER_PORT)) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT,
+ "Spanner proxyPort need to be assigned in order to start worker proxy.");
+ }
+ spannerPort = Integer.parseInt(commandLine.getOptionValue(OPTION_SPANNER_PORT));
+ if (spannerPort < MIN_PORT || spannerPort > MAX_PORT) {
+ throw new IllegalArgumentException(
+ "Spanner proxyPort must be between " + MIN_PORT + " and " + MAX_PORT);
+ }
+
+ if (!commandLine.hasOption(OPTION_PROXY_PORT)) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT,
+ "Proxy port need to be assigned in order to start worker proxy.");
+ }
+ proxyPort = Integer.parseInt(commandLine.getOptionValue(OPTION_PROXY_PORT));
+ if (proxyPort < MIN_PORT || proxyPort > MAX_PORT) {
+ throw new IllegalArgumentException(
+ "Proxy port must be between " + MIN_PORT + " and " + MAX_PORT);
+ }
+
+ if (!commandLine.hasOption(OPTION_CERTIFICATE)) {
+ throw SpannerExceptionFactory.newSpannerException(
+ ErrorCode.INVALID_ARGUMENT,
+ "Certificate need to be assigned in order to start worker proxy.");
+ }
+ cert = commandLine.getOptionValue(OPTION_CERTIFICATE);
+ if (commandLine.hasOption(OPTION_SERVICE_KEY_FILE)) {
+ serviceKeyFile = commandLine.getOptionValue(OPTION_SERVICE_KEY_FILE);
+ }
+
+ usePlainTextChannel = commandLine.hasOption(OPTION_USE_PLAIN_TEXT_CHANNEL);
+ enableGrpcFaultInjector = commandLine.hasOption(OPTION_ENABLE_GRPC_FAULT_INJECTOR);
+
+ Server server;
+ while (true) {
+ try {
+ CloudExecutorImpl cloudExecutorImpl = new CloudExecutorImpl(enableGrpcFaultInjector);
+ HealthStatusManager healthStatusManager = new HealthStatusManager();
+ // Set up Cloud server.
+ server =
+ ServerBuilder.forPort(proxyPort)
+ .addService(cloudExecutorImpl)
+ .addService(ProtoReflectionService.newInstance())
+ .addService(healthStatusManager.getHealthService())
+ .build();
+ server.start();
+ LOGGER.log(Level.INFO, String.format("Server started on proxyPort: %d", proxyPort));
+ } catch (IOException e) {
+ LOGGER.log(
+ Level.WARNING, String.format("Failed to start server on proxyPort %d", proxyPort), e);
+ continue; // We did not bind in time. Try another proxyPort.
+ }
+ break;
+ }
+ server.awaitTermination();
+ }
+
+ private static CommandLine buildOptions(String[] args) {
+ Options options = new Options();
+
+ options.addOption(
+ null, OPTION_SPANNER_PORT, true, "Port of Spanner Frontend to which to send requests.");
+ options.addOption(null, OPTION_PROXY_PORT, true, "Proxy port to start worker proxy on.");
+ options.addOption(
+ null, OPTION_CERTIFICATE, true, "Certificate used to connect to Spanner GFE.");
+ options.addOption(
+ null, OPTION_SERVICE_KEY_FILE, true, "Service key file used to set authentication.");
+ options.addOption(
+ null,
+ OPTION_USE_PLAIN_TEXT_CHANNEL,
+ false,
+ "Use a plain text gRPC channel (intended for the Cloud Spanner Emulator).");
+ options.addOption(
+ null,
+ OPTION_ENABLE_GRPC_FAULT_INJECTOR,
+ false,
+ "Enable grpc fault injector in cloud client executor.");
+
+ CommandLineParser parser = new DefaultParser();
+ try {
+ return parser.parse(options, args);
+ } catch (ParseException e) {
+ throw new IllegalArgumentException(e.getMessage());
+ }
+ }
+}
diff --git a/google-cloud-spanner-executor/src/main/resources/META-INF/services/io.grpc.LoadBalancerProvider b/google-cloud-spanner-executor/src/main/resources/META-INF/services/io.grpc.LoadBalancerProvider
new file mode 100644
index 00000000000..bbc367f8fc5
--- /dev/null
+++ b/google-cloud-spanner-executor/src/main/resources/META-INF/services/io.grpc.LoadBalancerProvider
@@ -0,0 +1 @@
+io.grpc.internal.PickFirstLoadBalancerProvider
diff --git a/license_file b/license_file
new file mode 100644
index 00000000000..eeb41531251
--- /dev/null
+++ b/license_file
@@ -0,0 +1,15 @@
+/*
+ * Copyright 2022 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
\ No newline at end of file
diff --git a/pom.xml b/pom.xml
index f109b02e046..75e4555dc60 100644
--- a/pom.xml
+++ b/pom.xml
@@ -132,6 +132,7 @@
proto-google-cloud-spanner-admin-instance-v1
proto-google-cloud-spanner-v1
proto-google-cloud-spanner-admin-database-v1
+ google-cloud-spanner-executor
google-cloud-spanner-bom
diff --git a/versions.txt b/versions.txt
index 75f52203d96..9727918dc52 100644
--- a/versions.txt
+++ b/versions.txt
@@ -8,3 +8,4 @@ grpc-google-cloud-spanner-v1:6.34.1:6.34.2-SNAPSHOT
grpc-google-cloud-spanner-admin-instance-v1:6.34.1:6.34.2-SNAPSHOT
grpc-google-cloud-spanner-admin-database-v1:6.34.1:6.34.2-SNAPSHOT
google-cloud-spanner:6.34.1:6.34.2-SNAPSHOT
+google-cloud-spanner-executor:6.34.1:6.34.2-SNAPSHOT