Skip to content

Commit

Permalink
MSQ generates tombstones honoring granularity specified in a `REPLACE…
Browse files Browse the repository at this point in the history
…` query. (apache#15243)

* MSQ generates tombstones honoring the query's granularity.

This change tweaks to only account for the infinite-interval tombstones.
For finite-interval tombstones, the MSQ query granualrity will be used
which is consistent with how MSQ works.

* more tests and some cleanup.

* checkstyle

* comment edits

* Throw TooManyBuckets fault based on review; add more tests.

* Add javadocs for both methods on reconciling the methods.

* review: Move testReplaceTombstonesWithTooManyBucketsThrowsException to MsqFaultsTest

* remove unused imports.

* Move TooManyBucketsException to indexing package for shared exception handling.

* lower max bucket for tests and fixup count

* Advance and count the iterator.

* checkstyle
  • Loading branch information
abhishekrb19 authored Nov 15, 2023
1 parent 06744d3 commit 2e79fd5
Show file tree
Hide file tree
Showing 13 changed files with 600 additions and 105 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@
import org.apache.druid.indexing.common.actions.SegmentTransactionalReplaceAction;
import org.apache.druid.indexing.common.actions.TaskAction;
import org.apache.druid.indexing.common.actions.TaskActionClient;
import org.apache.druid.indexing.common.task.batch.TooManyBucketsException;
import org.apache.druid.indexing.common.task.batch.parallel.TombstoneHelper;
import org.apache.druid.indexing.overlord.SegmentPublishResult;
import org.apache.druid.java.util.common.DateTimes;
Expand Down Expand Up @@ -121,6 +122,7 @@
import org.apache.druid.msq.indexing.error.MSQWarningReportLimiterPublisher;
import org.apache.druid.msq.indexing.error.MSQWarnings;
import org.apache.druid.msq.indexing.error.QueryNotSupportedFault;
import org.apache.druid.msq.indexing.error.TooManyBucketsFault;
import org.apache.druid.msq.indexing.error.TooManyWarningsFault;
import org.apache.druid.msq.indexing.error.UnknownFault;
import org.apache.druid.msq.indexing.error.WorkerRpcFailedFault;
Expand Down Expand Up @@ -1423,14 +1425,18 @@ private void publishAllSegments(final Set<DataSegment> segments) throws IOExcept
intervalsToDrop,
destination.getReplaceTimeChunks(),
task.getDataSource(),
destination.getSegmentGranularity()
destination.getSegmentGranularity(),
Limits.MAX_PARTITION_BUCKETS
);
segmentsWithTombstones.addAll(tombstones);
numTombstones = tombstones.size();
}
catch (IllegalStateException e) {
throw new MSQException(e, InsertLockPreemptedFault.instance());
}
catch (TooManyBucketsException e) {
throw new MSQException(e, new TooManyBucketsFault(Limits.MAX_PARTITION_BUCKETS));
}
}

if (segmentsWithTombstones.isEmpty()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,4 +91,9 @@ public class Limits
* MSQ is able to run async queries
*/
public static final long MAX_SELECT_RESULT_ROWS = 3_000;

/**
* Max number of partition buckets for ingestion queries.
*/
public static final int MAX_PARTITION_BUCKETS = 5_000;
}
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,9 @@
import org.apache.druid.frame.processor.FrameRowTooLargeException;
import org.apache.druid.frame.write.InvalidNullByteException;
import org.apache.druid.frame.write.UnsupportedColumnTypeException;
import org.apache.druid.indexing.common.task.batch.TooManyBucketsException;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.java.util.common.parsers.ParseException;
import org.apache.druid.msq.statistics.TooManyBucketsException;
import org.apache.druid.query.groupby.epinephelinae.UnexpectedMultiValueDimensionException;
import org.apache.druid.sql.calcite.planner.ColumnMappings;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
import org.apache.druid.java.util.common.Either;
import org.apache.druid.java.util.common.IAE;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.msq.exec.Limits;
import org.apache.druid.msq.input.InputSpec;
import org.apache.druid.msq.input.InputSpecs;
import org.apache.druid.msq.statistics.ClusterByStatisticsCollector;
Expand Down Expand Up @@ -81,7 +82,6 @@
*/
public class StageDefinition
{
private static final int PARTITION_STATS_MAX_BUCKETS = 5_000; // Limit for TooManyBuckets
private static final int MAX_PARTITIONS = 25_000; // Limit for TooManyPartitions

// If adding any fields here, add them to builder(StageDefinition) below too.
Expand Down Expand Up @@ -344,7 +344,7 @@ public ClusterByStatisticsCollector createResultKeyStatisticsCollector(final int
shuffleSpec.clusterBy(),
signature,
maxRetainedBytes,
PARTITION_STATS_MAX_BUCKETS,
Limits.MAX_PARTITION_BUCKETS,
shuffleSpec.doesAggregate(),
shuffleCheckHasMultipleValues
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
import org.apache.druid.frame.key.ClusterByPartitions;
import org.apache.druid.frame.key.RowKey;
import org.apache.druid.frame.key.RowKeyReader;
import org.apache.druid.indexing.common.task.batch.TooManyBucketsException;
import org.apache.druid.java.util.common.IAE;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.java.util.common.Pair;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,28 +20,38 @@
package org.apache.druid.msq.exec;

import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import org.apache.druid.error.DruidException;
import org.apache.druid.error.DruidExceptionMatcher;
import org.apache.druid.indexing.common.TaskLockType;
import org.apache.druid.indexing.common.actions.RetrieveUsedSegmentsAction;
import org.apache.druid.indexing.common.actions.SegmentAllocateAction;
import org.apache.druid.indexing.common.task.Tasks;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.java.util.common.Intervals;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.msq.indexing.error.InsertCannotAllocateSegmentFault;
import org.apache.druid.msq.indexing.error.InsertCannotBeEmptyFault;
import org.apache.druid.msq.indexing.error.InsertTimeNullFault;
import org.apache.druid.msq.indexing.error.InsertTimeOutOfBoundsFault;
import org.apache.druid.msq.indexing.error.TooManyBucketsFault;
import org.apache.druid.msq.indexing.error.TooManyClusteredByColumnsFault;
import org.apache.druid.msq.indexing.error.TooManyColumnsFault;
import org.apache.druid.msq.indexing.error.TooManyInputFilesFault;
import org.apache.druid.msq.indexing.error.TooManyPartitionsFault;
import org.apache.druid.msq.test.MSQTestBase;
import org.apache.druid.msq.test.MSQTestFileUtils;
import org.apache.druid.msq.test.MSQTestTaskActionClient;
import org.apache.druid.segment.column.ColumnType;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.segment.realtime.appenderator.SegmentIdWithShardSpec;
import org.apache.druid.timeline.DataSegment;
import org.apache.druid.timeline.partition.DimensionRangeShardSpec;
import org.apache.druid.timeline.partition.LinearShardSpec;
import org.hamcrest.CoreMatchers;
import org.junit.Test;
import org.junit.internal.matchers.ThrowableMessageMatcher;
import org.mockito.ArgumentMatchers;
import org.mockito.Mockito;

import java.io.File;
Expand Down Expand Up @@ -403,6 +413,97 @@ public void testReplaceWithAppendAndSharedLocks()
}
}

@Test
public void testReplaceTombstonesWithTooManyBucketsThrowsFault()
{
RowSignature rowSignature = RowSignature.builder()
.add("__time", ColumnType.LONG)
.add("dim1", ColumnType.STRING)
.add("cnt", ColumnType.LONG).build();

// Create a datasegment which lies partially outside the generated segment
DataSegment existingDataSegment = DataSegment.builder()
.interval(Intervals.of("2001-01-01T/2003-01-04T"))
.size(50)
.version(MSQTestTaskActionClient.VERSION)
.dataSource("foo1")
.build();

Mockito.doReturn(ImmutableSet.of(existingDataSegment))
.when(testTaskActionClient)
.submit(ArgumentMatchers.isA(RetrieveUsedSegmentsAction.class));

String expectedError = new TooManyBucketsFault(Limits.MAX_PARTITION_BUCKETS).getErrorMessage();


testIngestQuery().setSql(
"REPLACE INTO foo1 "
+ "OVERWRITE WHERE __time >= TIMESTAMP '2000-01-01 00:00:00' and __time < TIMESTAMP '2002-01-01 00:00:00'"
+ "SELECT __time, dim1 , count(*) as cnt "
+ "FROM foo "
+ "WHERE dim1 IS NOT NULL "
+ "GROUP BY 1, 2 "
+ "PARTITIONED by TIME_FLOOR(__time, 'PT1s') "
+ "CLUSTERED by dim1")
.setExpectedDataSource("foo1")
.setExpectedRowSignature(rowSignature)
.setExpectedShardSpec(DimensionRangeShardSpec.class)
.setExpectedExecutionErrorMatcher(
CoreMatchers.allOf(
CoreMatchers.instanceOf(ISE.class),
ThrowableMessageMatcher.hasMessage(
CoreMatchers.containsString(expectedError)
)
)
)
.verifyExecutionError();
}

@Test
public void testReplaceTombstonesWithTooManyBucketsThrowsFault2()
{
RowSignature rowSignature = RowSignature.builder()
.add("__time", ColumnType.LONG)
.add("dim1", ColumnType.STRING)
.add("cnt", ColumnType.LONG).build();

// Create a datasegment which lies partially outside the generated segment
DataSegment existingDataSegment = DataSegment.builder()
.interval(Intervals.of("2000-01-01T/2003-01-04T"))
.size(50)
.version(MSQTestTaskActionClient.VERSION)
.dataSource("foo1")
.build();

Mockito.doReturn(ImmutableSet.of(existingDataSegment))
.when(testTaskActionClient)
.submit(ArgumentMatchers.isA(RetrieveUsedSegmentsAction.class));

String expectedError = new TooManyBucketsFault(Limits.MAX_PARTITION_BUCKETS).getErrorMessage();


testIngestQuery().setSql(
"REPLACE INTO foo1 "
+ "OVERWRITE ALL "
+ "SELECT __time, dim1 , count(*) as cnt "
+ "FROM foo "
+ "GROUP BY 1, 2 "
+ "PARTITIONED by HOUR "
+ "CLUSTERED by dim1")
.setExpectedDataSource("foo1")
.setExpectedRowSignature(rowSignature)
.setExpectedShardSpec(DimensionRangeShardSpec.class)
.setExpectedExecutionErrorMatcher(
CoreMatchers.allOf(
CoreMatchers.instanceOf(ISE.class),
ThrowableMessageMatcher.hasMessage(
CoreMatchers.containsString(expectedError)
)
)
)
.verifyExecutionError();
}

private void testLockTypes(TaskLockType contextTaskLockType, String sql, String errorMessage)
{
Map<String, Object> context = new HashMap<>(DEFAULT_MSQ_CONTEXT);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -684,6 +684,58 @@ public void testReplaceTimeChunksLargerThanData()
.verifyResults();
}

@Test
public void testReplaceAllOverEternitySegment()
{
RowSignature rowSignature = RowSignature.builder()
.add("__time", ColumnType.LONG)
.add("m1", ColumnType.FLOAT)
.build();

// Create a datasegment which lies partially outside the generated segment
DataSegment existingDataSegment = DataSegment.builder()
.interval(Intervals.ETERNITY)
.size(50)
.version(MSQTestTaskActionClient.VERSION)
.dataSource("foo")
.build();

Mockito.doReturn(ImmutableSet.of(existingDataSegment))
.when(testTaskActionClient)
.submit(ArgumentMatchers.isA(RetrieveUsedSegmentsAction.class));

testIngestQuery().setSql(" REPLACE INTO foo "
+ "OVERWRITE ALL "
+ "SELECT __time, m1 "
+ "FROM foo "
+ "WHERE __time >= TIMESTAMP '2000-01-01' AND __time < TIMESTAMP '2000-01-03' "
+ "PARTITIONED BY MONTH")
.setExpectedDataSource("foo")
.setQueryContext(DEFAULT_MSQ_CONTEXT)
.setExpectedRowSignature(rowSignature)
.setQueryContext(context)
.setExpectedDestinationIntervals(Collections.singletonList(Intervals.ETERNITY))
.setExpectedTombstoneIntervals(
ImmutableSet.of(
Intervals.of("%s/%s", Intervals.ETERNITY.getStart(), "2000-01-01"),
Intervals.of("%s/%s", "2000-02-01", Intervals.ETERNITY.getEnd())
)
)
.setExpectedSegment(ImmutableSet.of(SegmentId.of(
"foo",
Intervals.of("2000-01-01T/P1M"),
"test",
0
)))
.setExpectedResultRows(
ImmutableList.of(
new Object[]{946684800000L, 1.0f},
new Object[]{946771200000L, 2.0f}
)
)
.verifyResults();
}

@Test
public void testReplaceOnFoo1Range()
{
Expand Down Expand Up @@ -985,7 +1037,9 @@ public void testReplaceTombstonesOverPartiallyOverlappingSegments()
.setExpectedShardSpec(DimensionRangeShardSpec.class)
.setExpectedTombstoneIntervals(
ImmutableSet.of(
Intervals.of("2001-04-01/2002-01-01")
Intervals.of("2001-04-01/P3M"),
Intervals.of("2001-07-01/P3M"),
Intervals.of("2001-10-01/P3M")
)
)
.setExpectedResultRows(expectedResults)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@

import org.apache.druid.frame.processor.FrameRowTooLargeException;
import org.apache.druid.frame.write.UnsupportedColumnTypeException;
import org.apache.druid.indexing.common.task.batch.TooManyBucketsException;
import org.apache.druid.java.util.common.parsers.ParseException;
import org.apache.druid.msq.statistics.TooManyBucketsException;
import org.apache.druid.query.QueryTimeoutException;
import org.apache.druid.query.groupby.epinephelinae.UnexpectedMultiValueDimensionException;
import org.junit.Assert;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
import org.apache.druid.frame.key.KeyTestUtils;
import org.apache.druid.frame.key.RowKey;
import org.apache.druid.frame.key.RowKeyReader;
import org.apache.druid.indexing.common.task.batch.TooManyBucketsException;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.java.util.common.Pair;
import org.apache.druid.java.util.common.StringUtils;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
* under the License.
*/

package org.apache.druid.msq.statistics;
package org.apache.druid.indexing.common.task.batch;

import org.apache.druid.java.util.common.StringUtils;

Expand Down
Loading

0 comments on commit 2e79fd5

Please sign in to comment.