Skip to content

Commit

Permalink
Add initial DevServices-like implementation
Browse files Browse the repository at this point in the history
Adds a new `dev-services` Maven profile. It is intended to be used in conjunction with the Jetty plugin. A respective IntelliJ run configuration is included.

When enabled, PostgreSQL and Redpanda containers will be launched automatically, and disposed when the application stops. Dependency-Track will be auto-configured to use the containers, no manual configuration required.

To avoid introducing runtime dependencies on test libraries for production builds, the logic uses reflection to interact with the `testcontainers` library.

Topics required by the API server will be created automatically. Database migrations are executed as usual.

The containers are not currently discoverable and re-usable by the Quarkus-based services. That capability is planned for a future iteration, see DependencyTrack/hyades#1188

With this functionality, it becomes easier to test features that do not rely on the other Hyades services (e.g. REST API related stuff), or frontend modifications. In those scenarios, using Docker Compose is no longer needed.

Signed-off-by: nscuro <[email protected]>
  • Loading branch information
nscuro committed Jun 15, 2024
1 parent bc0a8be commit 3132a8f
Show file tree
Hide file tree
Showing 6 changed files with 290 additions and 11 deletions.
2 changes: 0 additions & 2 deletions .idea/runConfigurations/Jetty.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

52 changes: 52 additions & 0 deletions .idea/runConfigurations/Jetty_w__Dev_Services.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

53 changes: 53 additions & 0 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -646,6 +646,7 @@
<configuration>
<excludes>
<exclude>org/cyclonedx/proto/**/*</exclude>
<exclude>org/dependencytrack/dev/**/*</exclude>
<exclude>org/dependencytrack/proto/**/*</exclude>
</excludes>
</configuration>
Expand Down Expand Up @@ -702,6 +703,13 @@
<webApp>
<contextPath>/</contextPath>
</webApp>
<systemProperties>
<!--
Disable Jetty warnings about JARs being scanned from multiple locations.
https://stackoverflow.com/a/73964232
-->
<org.slf4j.simpleLogger.log.org.eclipse.jetty.annotations.AnnotationParser>ERROR</org.slf4j.simpleLogger.log.org.eclipse.jetty.annotations.AnnotationParser>
</systemProperties>
</configuration>
</plugin>
<plugin>
Expand Down Expand Up @@ -755,6 +763,51 @@
<war-embedded-finalname>${project.build.finalName}-apiserver</war-embedded-finalname>
</properties>
</profile>
<profile>
<id>dev-services</id>
<properties>
<!-- Don't compile test classes, we only want to run Jetty. -->
<maven.test.skip>true</maven.test.skip>
</properties>
<dependencies>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>postgresql</artifactId>
<version>${lib.testcontainers.version}</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>redpanda</artifactId>
<version>${lib.testcontainers.version}</version>
<scope>compile</scope>
</dependency>
<dependency>
<!--
Unfortunately required by testcontainers:
https://github.com/testcontainers/testcontainers-java/issues/970
-->
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>${lib.junit.version}</version>
<scope>compile</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-maven-plugin</artifactId>
<version>${plugin.jetty.version}</version>
<configuration>
<systemProperties>
<dev.services.enabled>true</dev.services.enabled>
</systemProperties>
</configuration>
</plugin>
</plugins>
</build>
</profile>
</profiles>

</project>
170 changes: 170 additions & 0 deletions src/main/java/org/dependencytrack/dev/DevServicesInitializer.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,170 @@
/*
* This file is part of Dependency-Track.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
* Copyright (c) OWASP Foundation. All Rights Reserved.
*/
package org.dependencytrack.dev;

import alpine.Config;
import alpine.common.logging.Logger;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.NewTopic;
import org.dependencytrack.event.kafka.KafkaTopics;

import javax.servlet.ServletContextEvent;
import javax.servlet.ServletContextListener;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;

import static alpine.Config.AlpineKey.DATABASE_PASSWORD;
import static alpine.Config.AlpineKey.DATABASE_URL;
import static alpine.Config.AlpineKey.DATABASE_USERNAME;
import static org.apache.kafka.clients.admin.AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG;
import static org.apache.kafka.common.config.TopicConfig.CLEANUP_POLICY_COMPACT;
import static org.apache.kafka.common.config.TopicConfig.CLEANUP_POLICY_CONFIG;
import static org.dependencytrack.common.ConfigKey.KAFKA_BOOTSTRAP_SERVERS;

/**
* @since 5.5.0
*/
public class DevServicesInitializer implements ServletContextListener {

private static final Logger LOGGER = Logger.getLogger(DevServicesInitializer.class);
private static final String POSTGRES_IMAGE = "postgres:16-alpine";
private static final String REDPANDA_IMAGE = "docker.redpanda.com/vectorized/redpanda:v24.1.7";

private AutoCloseable postgresContainer;
private AutoCloseable redpandaContainer;

@Override
public void contextInitialized(final ServletContextEvent event) {
if (!"true".equals(System.getProperty("dev.services.enabled"))) {
return;
}

final String postgresJdbcUrl;
final String postgresUsername;
final String postgresPassword;
final String redpandaBootstrapServers;
try {
final Class<?> startablesClass = Class.forName("org.testcontainers.lifecycle.Startables");
final Method deepStartMethod = startablesClass.getDeclaredMethod("deepStart", Collection.class);

final Class<?> postgresContainerClass = Class.forName("org.testcontainers.containers.PostgreSQLContainer");
final Constructor<?> postgresContainerConstructor = postgresContainerClass.getDeclaredConstructor(String.class);
postgresContainer = (AutoCloseable) postgresContainerConstructor.newInstance(POSTGRES_IMAGE);

final Class<?> redpandaContainerClass = Class.forName("org.testcontainers.redpanda.RedpandaContainer");
final Constructor<?> redpandaContainerConstructor = redpandaContainerClass.getDeclaredConstructor(String.class);
redpandaContainer = (AutoCloseable) redpandaContainerConstructor.newInstance(REDPANDA_IMAGE);

LOGGER.info("Starting PostgreSQL and Redpanda containers");
final var deepStartFuture = (CompletableFuture<?>) deepStartMethod.invoke(null, List.of(postgresContainer, redpandaContainer));
deepStartFuture.join();

postgresJdbcUrl = (String) postgresContainerClass.getDeclaredMethod("getJdbcUrl").invoke(postgresContainer);
postgresUsername = (String) postgresContainerClass.getDeclaredMethod("getUsername").invoke(postgresContainer);
postgresPassword = (String) postgresContainerClass.getDeclaredMethod("getPassword").invoke(postgresContainer);
redpandaBootstrapServers = (String) redpandaContainerClass.getDeclaredMethod("getBootstrapServers").invoke(redpandaContainer);
} catch (Exception e) {
throw new RuntimeException("Failed to launch containers", e);

Check warning on line 91 in src/main/java/org/dependencytrack/dev/DevServicesInitializer.java

View check run for this annotation

Codacy Production / Codacy Static Code Analysis

src/main/java/org/dependencytrack/dev/DevServicesInitializer.java#L91

Avoid throwing raw exception types.
}

LOGGER.warn("""
Containers are not auto-discoverable by other services yet. \
If interaction with other services is required, please use \
the Docker Compose setup in the DependencyTrack/hyades repository. \
Auto-discovery is worked on in https://github.com/DependencyTrack/hyades/issues/1188.\
""");

final var configOverrides = new Properties();
configOverrides.put(DATABASE_URL.getPropertyName(), postgresJdbcUrl);
configOverrides.put(DATABASE_USERNAME.getPropertyName(), postgresUsername);
configOverrides.put(DATABASE_PASSWORD.getPropertyName(), postgresPassword);
configOverrides.put(KAFKA_BOOTSTRAP_SERVERS.getPropertyName(), redpandaBootstrapServers);

try {
LOGGER.info("Applying config overrides: %s".formatted(configOverrides));
final Field propertiesField = Config.class.getDeclaredField("properties");
propertiesField.setAccessible(true);

Check warning on line 110 in src/main/java/org/dependencytrack/dev/DevServicesInitializer.java

View check run for this annotation

Codacy Production / Codacy Static Code Analysis

src/main/java/org/dependencytrack/dev/DevServicesInitializer.java#L110

You should not modify visibility of constructors, methods or fields using setAccessible()

final Properties properties = (Properties) propertiesField.get(Config.getInstance());
properties.putAll(configOverrides);
} catch (Exception e) {
throw new RuntimeException("Failed to update configuration", e);

Check warning on line 115 in src/main/java/org/dependencytrack/dev/DevServicesInitializer.java

View check run for this annotation

Codacy Production / Codacy Static Code Analysis

src/main/java/org/dependencytrack/dev/DevServicesInitializer.java#L115

Avoid throwing raw exception types.
}

final var topicsToCreate = new ArrayList<>(List.of(
new NewTopic(KafkaTopics.NEW_EPSS.name(), 1, (short) 1).configs(Map.of(CLEANUP_POLICY_CONFIG, CLEANUP_POLICY_COMPACT)),
new NewTopic(KafkaTopics.NEW_VULNERABILITY.name(), 1, (short) 1).configs(Map.of(CLEANUP_POLICY_CONFIG, CLEANUP_POLICY_COMPACT)),
new NewTopic(KafkaTopics.NOTIFICATION_ANALYZER.name(), 1, (short) 1),
new NewTopic(KafkaTopics.NOTIFICATION_BOM.name(), 1, (short) 1),
new NewTopic(KafkaTopics.NOTIFICATION_CONFIGURATION.name(), 1, (short) 1),
new NewTopic(KafkaTopics.NOTIFICATION_DATASOURCE_MIRRORING.name(), 1, (short) 1),
new NewTopic(KafkaTopics.NOTIFICATION_FILE_SYSTEM.name(), 1, (short) 1),
new NewTopic(KafkaTopics.NOTIFICATION_INTEGRATION.name(), 1, (short) 1),
new NewTopic(KafkaTopics.NOTIFICATION_NEW_VULNERABILITY.name(), 1, (short) 1),
new NewTopic(KafkaTopics.NOTIFICATION_NEW_VULNERABLE_DEPENDENCY.name(), 1, (short) 1),
new NewTopic(KafkaTopics.NOTIFICATION_POLICY_VIOLATION.name(), 1, (short) 1),
new NewTopic(KafkaTopics.NOTIFICATION_PROJECT_AUDIT_CHANGE.name(), 1, (short) 1),
new NewTopic(KafkaTopics.NOTIFICATION_PROJECT_CREATED.name(), 1, (short) 1),
new NewTopic(KafkaTopics.NOTIFICATION_PROJECT_VULN_ANALYSIS_COMPLETE.name(), 1, (short) 1),
new NewTopic(KafkaTopics.NOTIFICATION_REPOSITORY.name(), 1, (short) 1),
new NewTopic(KafkaTopics.NOTIFICATION_VEX.name(), 1, (short) 1),
new NewTopic(KafkaTopics.REPO_META_ANALYSIS_COMMAND.name(), 1, (short) 1),
new NewTopic(KafkaTopics.REPO_META_ANALYSIS_RESULT.name(), 1, (short) 1),
new NewTopic(KafkaTopics.VULN_ANALYSIS_COMMAND.name(), 1, (short) 1),
new NewTopic(KafkaTopics.VULN_ANALYSIS_RESULT.name(), 1, (short) 1),
new NewTopic(KafkaTopics.VULN_ANALYSIS_RESULT_PROCESSED.name(), 1, (short) 1)
));

try (final var adminClient = AdminClient.create(Map.of(BOOTSTRAP_SERVERS_CONFIG, redpandaBootstrapServers))) {
LOGGER.info("Creating topics: %s".formatted(topicsToCreate));
adminClient.createTopics(topicsToCreate).all().get();
} catch (ExecutionException | InterruptedException e) {
throw new RuntimeException("Failed to create topics", e);

Check warning on line 146 in src/main/java/org/dependencytrack/dev/DevServicesInitializer.java

View check run for this annotation

Codacy Production / Codacy Static Code Analysis

src/main/java/org/dependencytrack/dev/DevServicesInitializer.java#L146

Avoid throwing raw exception types.
}
}

@Override
public void contextDestroyed(final ServletContextEvent event) {
if (postgresContainer != null) {
LOGGER.info("Stopping postgres container");
try {
postgresContainer.close();
} catch (Exception e) {
throw new RuntimeException("Failed to stop PostgreSQL container", e);

Check warning on line 157 in src/main/java/org/dependencytrack/dev/DevServicesInitializer.java

View check run for this annotation

Codacy Production / Codacy Static Code Analysis

src/main/java/org/dependencytrack/dev/DevServicesInitializer.java#L157

Avoid throwing raw exception types.
}
}
if (redpandaContainer != null) {
LOGGER.info("Stopping redpanda container");
try {
redpandaContainer.close();
} catch (Exception e) {
throw new RuntimeException("Failed to stop Redpanda container", e);

Check warning on line 165 in src/main/java/org/dependencytrack/dev/DevServicesInitializer.java

View check run for this annotation

Codacy Production / Codacy Static Code Analysis

src/main/java/org/dependencytrack/dev/DevServicesInitializer.java#L165

Avoid throwing raw exception types.
}
}
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ public class ProcessorManager implements AutoCloseable {
private final Map<String, ManagedProcessor> managedProcessors = new LinkedHashMap<>();
private final UUID instanceId;
private final Config config;
private final AdminClient adminClient;
private AdminClient adminClient;

public ProcessorManager() {
this(UUID.randomUUID(), Config.getInstance());
Expand All @@ -99,7 +99,6 @@ public ProcessorManager() {
public ProcessorManager(final UUID instanceId, final Config config) {
this.instanceId = instanceId;
this.config = config;
this.adminClient = createAdminClient();
}

/**
Expand Down Expand Up @@ -165,8 +164,8 @@ public HealthCheckResponse probeHealth() {
? HealthCheckResponse.Status.UP.name()
: HealthCheckResponse.Status.DOWN.name());
if (isProcessorUp
&& parallelConsumer instanceof final ParallelEoSStreamProcessor<?, ?> concreteParallelConsumer
&& concreteParallelConsumer.getFailureCause() != null) {
&& parallelConsumer instanceof final ParallelEoSStreamProcessor<?, ?> concreteParallelConsumer
&& concreteParallelConsumer.getFailureCause() != null) {
responseBuilder.withData("%s_failure_reason".formatted(processorName),
concreteParallelConsumer.getFailureCause().getMessage());
}
Expand Down Expand Up @@ -198,7 +197,7 @@ private void ensureTopicsExist() {
final List<String> topicNames = managedProcessors.values().stream().map(ManagedProcessor::topic).toList();
LOGGER.info("Verifying existence of subscribed topics: %s".formatted(topicNames));

final DescribeTopicsResult topicsResult = adminClient.describeTopics(topicNames, new DescribeTopicsOptions().timeoutMs(3_000));
final DescribeTopicsResult topicsResult = adminClient().describeTopics(topicNames, new DescribeTopicsOptions().timeoutMs(3_000));
final var exceptionsByTopicName = new HashMap<String, Throwable>();
for (final Map.Entry<String, KafkaFuture<TopicDescription>> entry : topicsResult.topicNameValues().entrySet()) {
final String topicName = entry.getKey();
Expand All @@ -225,7 +224,7 @@ private void ensureTopicsExist() {

private int getTopicPartitionCount(final String topicName) {
LOGGER.debug("Determining partition count of topic %s".formatted(topicName));
final DescribeTopicsResult topicsResult = adminClient.describeTopics(List.of(topicName), new DescribeTopicsOptions().timeoutMs(3_000));
final DescribeTopicsResult topicsResult = adminClient().describeTopics(List.of(topicName), new DescribeTopicsOptions().timeoutMs(3_000));
final KafkaFuture<TopicDescription> topicDescriptionFuture = topicsResult.topicNameValues().get(topicName);

try {
Expand Down Expand Up @@ -343,14 +342,19 @@ private Consumer<byte[], byte[]> createConsumer(final String processorName) {
return consumer;
}

private AdminClient createAdminClient() {
private AdminClient adminClient() {
if (adminClient != null) {
return adminClient;
}

final var adminClientConfig = new HashMap<String, Object>();
adminClientConfig.put(BOOTSTRAP_SERVERS_CONFIG, config.getProperty(KAFKA_BOOTSTRAP_SERVERS));
adminClientConfig.put(CLIENT_ID_CONFIG, "%s-admin-client".formatted(instanceId));
adminClientConfig.putAll(getGlobalTlsConfig());

LOGGER.debug("Creating admin client with options %s".formatted(adminClientConfig));
return AdminClient.create(adminClientConfig);
adminClient = AdminClient.create(adminClientConfig);
return adminClient;
}

private Map<String, Object> getGlobalTlsConfig() {
Expand Down
4 changes: 3 additions & 1 deletion src/main/webapp/WEB-INF/web.xml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,9 @@
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://xmlns.jcp.org/xml/ns/javaee http://xmlns.jcp.org/xml/ns/javaee/web-app_3_1.xsd"
version="3.1">

<listener>
<listener-class>org.dependencytrack.dev.DevServicesInitializer</listener-class>
</listener>
<listener>
<listener-class>alpine.server.metrics.MetricsInitializer</listener-class>
</listener>
Expand Down

0 comments on commit 3132a8f

Please sign in to comment.