Skip to content

Commit

Permalink
fix: always allow metadata queries (#2580)
Browse files Browse the repository at this point in the history
* fix: always allow metadata queries

Internal metadata queries should always be allowed, regardless of the type
of transaction that is currently running or any special mode that has been
set on the connection. Internal metadata queries are system queries that
are generated by a connection to return metadata to an application, for
example methods like getTables() in JDBC.

* 🦉 Updates from OwlBot post-processor

See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md

* chore: run code formatter

* chore: make if condition more readable

Co-authored-by: Rajat Bhatta <[email protected]>

---------

Co-authored-by: Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com>
Co-authored-by: Rajat Bhatta <[email protected]>
  • Loading branch information
3 people authored Aug 11, 2023
1 parent 850435b commit ebb17fc
Show file tree
Hide file tree
Showing 6 changed files with 205 additions and 72 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -133,17 +133,6 @@ enum BatchMode {
DML
}

/**
* This query option is used internally to indicate that a query is executed by the library itself
* to fetch metadata. These queries are specifically allowed to be executed even when a DDL batch
* is active.
*/
static final class InternalMetadataQuery implements QueryOption {
static final InternalMetadataQuery INSTANCE = new InternalMetadataQuery();

private InternalMetadataQuery() {}
}

/** The combination of all transaction modes and batch modes. */
enum UnitOfWorkType {
READ_ONLY_TRANSACTION {
Expand Down Expand Up @@ -1219,6 +1208,18 @@ private AsyncResultSet parseAndExecuteQueryAsync(
+ parsedStatement.getSqlWithoutComments());
}

private boolean isInternalMetadataQuery(QueryOption... options) {
if (options == null) {
return false;
}
for (QueryOption option : options) {
if (option instanceof InternalMetadataQuery) {
return true;
}
}
return false;
}

@Override
public long executeUpdate(Statement update) {
Preconditions.checkNotNull(update);
Expand Down Expand Up @@ -1450,8 +1451,11 @@ private ResultSet internalExecuteQuery(
|| (statement.getType() == StatementType.UPDATE
&& (analyzeMode != AnalyzeMode.NONE || statement.hasReturningClause())),
"Statement must either be a query or a DML mode with analyzeMode!=NONE or returning clause");
UnitOfWork transaction = getCurrentUnitOfWorkOrStartNewUnitOfWork();
if (autoPartitionMode && statement.getType() == StatementType.QUERY) {
boolean isInternalMetadataQuery = isInternalMetadataQuery(options);
UnitOfWork transaction = getCurrentUnitOfWorkOrStartNewUnitOfWork(isInternalMetadataQuery);
if (autoPartitionMode
&& statement.getType() == StatementType.QUERY
&& !isInternalMetadataQuery) {
return runPartitionedQuery(
statement.getStatement(), PartitionOptions.getDefaultInstance(), options);
}
Expand All @@ -1475,7 +1479,8 @@ private AsyncResultSet internalExecuteQueryAsync(
ConnectionPreconditions.checkState(
!(autoPartitionMode && statement.getType() == StatementType.QUERY),
"Partitioned queries cannot be executed asynchronously");
UnitOfWork transaction = getCurrentUnitOfWorkOrStartNewUnitOfWork();
UnitOfWork transaction =
getCurrentUnitOfWorkOrStartNewUnitOfWork(isInternalMetadataQuery(options));
return ResultSets.toAsyncResultSet(
transaction.executeQueryAsync(
callType,
Expand Down Expand Up @@ -1514,22 +1519,31 @@ private ApiFuture<long[]> internalExecuteBatchUpdateAsync(
callType, updates, mergeUpdateRequestOptions(mergeUpdateStatementTag(options)));
}

private UnitOfWork getCurrentUnitOfWorkOrStartNewUnitOfWork() {
return getCurrentUnitOfWorkOrStartNewUnitOfWork(false);
}

/**
* Returns the current {@link UnitOfWork} of this connection, or creates a new one based on the
* current transaction settings of the connection and returns that.
*/
@VisibleForTesting
UnitOfWork getCurrentUnitOfWorkOrStartNewUnitOfWork() {
UnitOfWork getCurrentUnitOfWorkOrStartNewUnitOfWork(boolean isInternalMetadataQuery) {
if (isInternalMetadataQuery) {
// Just return a temporary single-use transaction.
return createNewUnitOfWork(true);
}
if (this.currentUnitOfWork == null || !this.currentUnitOfWork.isActive()) {
this.currentUnitOfWork = createNewUnitOfWork();
this.currentUnitOfWork = createNewUnitOfWork(false);
}
return this.currentUnitOfWork;
}

@VisibleForTesting
UnitOfWork createNewUnitOfWork() {
if (isAutocommit() && !isInTransaction() && !isInBatch()) {
UnitOfWork createNewUnitOfWork(boolean isInternalMetadataQuery) {
if (isInternalMetadataQuery || (isAutocommit() && !isInTransaction() && !isInBatch())) {
return SingleUseTransaction.newBuilder()
.setInternalMetadataQuery(isInternalMetadataQuery)
.setDdlClient(ddlClient)
.setDatabaseClient(dbClient)
.setBatchClient(batchClient)
Expand Down Expand Up @@ -1660,7 +1674,7 @@ public void startBatchDdl() {
!transactionBeginMarked, "Cannot start a DDL batch when a transaction has begun");
this.batchMode = BatchMode.DDL;
this.unitOfWorkType = UnitOfWorkType.DDL_BATCH;
this.currentUnitOfWork = createNewUnitOfWork();
this.currentUnitOfWork = createNewUnitOfWork(false);
}

@Override
Expand All @@ -1678,7 +1692,7 @@ public void startBatchDml() {
// Then create the DML batch.
this.batchMode = BatchMode.DML;
this.unitOfWorkType = UnitOfWorkType.DML_BATCH;
this.currentUnitOfWork = createNewUnitOfWork();
this.currentUnitOfWork = createNewUnitOfWork(false);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,15 +33,12 @@
import com.google.cloud.spanner.SpannerExceptionFactory;
import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement;
import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType;
import com.google.cloud.spanner.connection.Connection.InternalMetadataQuery;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.spanner.admin.database.v1.DatabaseAdminGrpc;
import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata;
import com.google.spanner.v1.SpannerGrpc;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.Callable;

Expand Down Expand Up @@ -121,29 +118,6 @@ public ApiFuture<ResultSet> executeQueryAsync(
final ParsedStatement statement,
AnalyzeMode analyzeMode,
QueryOption... options) {
if (options != null) {
for (int i = 0; i < options.length; i++) {
if (options[i] instanceof InternalMetadataQuery) {
Preconditions.checkNotNull(statement);
Preconditions.checkArgument(statement.isQuery(), "Statement is not a query");
Preconditions.checkArgument(
analyzeMode == AnalyzeMode.NONE, "Analyze is not allowed for DDL batch");
// Queries marked with internal metadata queries are allowed during a DDL batch.
// These can only be generated by library internal methods and may be used to check
// whether a database object such as table or an index exists.
List<QueryOption> temp = new ArrayList<>();
Collections.addAll(temp, options);
temp.remove(i);
final QueryOption[] internalOptions = temp.toArray(new QueryOption[0]);
Callable<ResultSet> callable =
() ->
DirectExecuteResultSet.ofResultSet(
dbClient.singleUse().executeQuery(statement.getStatement(), internalOptions));
return executeStatementAsync(
callType, statement, callable, SpannerGrpc.getExecuteStreamingSqlMethod());
}
}
}
// Queries are by default not allowed on DDL batches.
throw SpannerExceptionFactory.newSpannerException(
ErrorCode.FAILED_PRECONDITION, "Executing queries is not allowed for DDL batches.");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ class SingleUseTransaction extends AbstractBaseUnitOfWork {
private final TimestampBound readOnlyStaleness;
private final AutocommitDmlMode autocommitDmlMode;
private final boolean returnCommitStats;
private final boolean internalMetdataQuery;
private volatile SettableApiFuture<Timestamp> readTimestamp = null;
private volatile TransactionRunner writeTransaction;
private boolean used = false;
Expand All @@ -91,6 +92,7 @@ static class Builder extends AbstractBaseUnitOfWork.Builder<Builder, SingleUseTr
private TimestampBound readOnlyStaleness;
private AutocommitDmlMode autocommitDmlMode;
private boolean returnCommitStats;
private boolean internalMetadataQuery;

private Builder() {}

Expand Down Expand Up @@ -133,6 +135,11 @@ Builder setReturnCommitStats(boolean returnCommitStats) {
return this;
}

Builder setInternalMetadataQuery(boolean internalMetadataQuery) {
this.internalMetadataQuery = internalMetadataQuery;
return this;
}

@Override
SingleUseTransaction build() {
Preconditions.checkState(ddlClient != null, "No DDL client specified");
Expand All @@ -157,6 +164,7 @@ private SingleUseTransaction(Builder builder) {
this.readOnlyStaleness = builder.readOnlyStaleness;
this.autocommitDmlMode = builder.autocommitDmlMode;
this.returnCommitStats = builder.returnCommitStats;
this.internalMetdataQuery = builder.internalMetadataQuery;
}

@Override
Expand Down Expand Up @@ -207,8 +215,11 @@ public ApiFuture<ResultSet> executeQueryAsync(
return executeDmlReturningAsync(callType, statement, options);
}

// Do not use a read-only staleness for internal metadata queries.
final ReadOnlyTransaction currentTransaction =
dbClient.singleUseReadOnlyTransaction(readOnlyStaleness);
internalMetdataQuery
? dbClient.singleUseReadOnlyTransaction()
: dbClient.singleUseReadOnlyTransaction(readOnlyStaleness);
Callable<ResultSet> callable =
() -> {
try {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1389,7 +1389,7 @@ public void testMergeQueryOptions() {
new ConnectionImpl(
connectionOptions, spannerPool, ddlClient, dbClient, mock(BatchClient.class)) {
@Override
UnitOfWork getCurrentUnitOfWorkOrStartNewUnitOfWork() {
UnitOfWork getCurrentUnitOfWorkOrStartNewUnitOfWork(boolean isInternalMetadataQuery) {
return unitOfWork;
}
}) {
Expand Down Expand Up @@ -1498,7 +1498,7 @@ public void testStatementTagAlwaysAllowed() {
new ConnectionImpl(
connectionOptions, spannerPool, ddlClient, dbClient, mock(BatchClient.class)) {
@Override
UnitOfWork getCurrentUnitOfWorkOrStartNewUnitOfWork() {
UnitOfWork getCurrentUnitOfWorkOrStartNewUnitOfWork(boolean isInternalMetadataQuery) {
return unitOfWork;
}
}) {
Expand Down Expand Up @@ -1609,7 +1609,7 @@ public void testTransactionTagNotAllowedAfterTransactionStarted() {
new ConnectionImpl(
connectionOptions, spannerPool, ddlClient, dbClient, mock(BatchClient.class)) {
@Override
UnitOfWork createNewUnitOfWork() {
UnitOfWork createNewUnitOfWork(boolean isInternalMetadataQuery) {
return unitOfWork;
}
}) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,15 +40,12 @@
import com.google.cloud.spanner.Dialect;
import com.google.cloud.spanner.ErrorCode;
import com.google.cloud.spanner.Mutation;
import com.google.cloud.spanner.ReadContext;
import com.google.cloud.spanner.ResultSet;
import com.google.cloud.spanner.SpannerBatchUpdateException;
import com.google.cloud.spanner.SpannerException;
import com.google.cloud.spanner.SpannerExceptionFactory;
import com.google.cloud.spanner.Statement;
import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement;
import com.google.cloud.spanner.connection.AbstractStatementParser.StatementType;
import com.google.cloud.spanner.connection.Connection.InternalMetadataQuery;
import com.google.cloud.spanner.connection.UnitOfWork.CallType;
import com.google.cloud.spanner.connection.UnitOfWork.UnitOfWorkState;
import com.google.protobuf.Timestamp;
Expand Down Expand Up @@ -157,25 +154,6 @@ public void testExecuteCreateDatabase() {
.parse(Statement.of("CREATE DATABASE foo"))));
}

@Test
public void testExecuteMetadataQuery() {
Statement statement = Statement.of("SELECT * FROM INFORMATION_SCHEMA.TABLES");
ParsedStatement parsedStatement = mock(ParsedStatement.class);
when(parsedStatement.isQuery()).thenReturn(true);
when(parsedStatement.getStatement()).thenReturn(statement);
DatabaseClient dbClient = mock(DatabaseClient.class);
ReadContext singleUse = mock(ReadContext.class);
ResultSet resultSet = mock(ResultSet.class);
when(singleUse.executeQuery(statement)).thenReturn(resultSet);
when(dbClient.singleUse()).thenReturn(singleUse);
DdlBatch batch = createSubject(createDefaultMockDdlClient(), dbClient);
assertThat(
get(batch.executeQueryAsync(
CallType.SYNC, parsedStatement, AnalyzeMode.NONE, InternalMetadataQuery.INSTANCE))
.hashCode(),
is(equalTo(resultSet.hashCode())));
}

@Test
public void testExecuteUpdate() {
DdlBatch batch = createSubject();
Expand Down
Loading

0 comments on commit ebb17fc

Please sign in to comment.